package com.edata.bigdata.spark.streaming;

import com.edata.bigdata.basic.Commons;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.streaming.DataStreamWriter;
import org.apache.spark.sql.streaming.OutputMode;
import org.apache.spark.sql.streaming.Trigger;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.util.Properties;

public class StreamSink {

    public Logger logger = LoggerFactory.getLogger(this.getClass());

    public enum SinkType {
        PARQUET, JSON, CSV, ORC, TEXT,
        KAFKA, CONSOLE, MEMORY,
        JDBC
    }

    public <T> DataStreamWriter<T> createDataStreamWriter(Dataset<T> stream,
                                                          SinkType sinkType,
                                                          OutputMode outputMode,
                                                          String triggerTime,
                                                          Properties options,
                                                          Class<T> clazz) {
        DataStreamWriter<T> writer = stream.writeStream()
                .option("checkpointLocation", options.getProperty("checkpoint.location"));

        if (triggerTime != null && !triggerTime.isBlank()) {
            writer = writer.trigger(Trigger.ProcessingTime(triggerTime));
        }
        switch (sinkType) {
            case PARQUET:
                writer.outputMode(outputMode)
                        .format("parquet").option("path", options.getProperty("path"));
                break;
            case JSON:
                writer.outputMode(outputMode)
                        .format("json").option("path", options.getProperty("path"));
                break;
            case CSV:
                writer.outputMode(outputMode)
                        .format("csv").option("path", options.getProperty("path"));
                break;
            case ORC:
                writer.outputMode(outputMode)
                        .format("orc").option("path", options.getProperty("path"));
                break;
            case KAFKA:
                writer.outputMode(outputMode)
                        .format("kafka")
                        .option("kafka.bootstrap.servers", options.getProperty("kafka.bootstrap.servers"))
                        .option("subscribe", options.getProperty("subscribe"));
                break;
            case CONSOLE:
                writer.outputMode(outputMode)
                        .format("console")
                        .option("truncate", false)
                        .option("numRows", 50);
                break;
            case JDBC:
                writer.foreachBatch((Dataset<T> batch, Long batchId) -> {
                    try {
                        Logger logger = LoggerFactory.getLogger(this.getClass());
                        String sql = Commons.createUnPreparedUpsertSQLByClazz(
                                options.getProperty("jdbc.upsert.conflict.field.name"),
                                clazz
                        );
                        batch.foreachPartition(iter -> {
                            Connection conn = DriverManager.getConnection(
                                    options.getProperty("jdbc.url"),
                                    options.getProperty("jdbc.user"),
                                    options.getProperty("jdbc.password")
                            );
                            conn.setAutoCommit(false);
                            PreparedStatement stmt = conn.prepareStatement(sql);
                            int size = 0;
                            int COMMIT_BATCH_SIZE = 1000;//每1000条提交一次
                            while (iter.hasNext()) {
                                T record = iter.next();
                                Commons.setPreparedStatementParameters(stmt, record, clazz);
                                stmt.addBatch();
                                size++;
                                if (size % COMMIT_BATCH_SIZE == 0) {
                                    stmt.executeBatch();
                                    conn.commit();
                                    logger.info("本分区已保存部分数据，数据量：{}", size);
                                    stmt.clearBatch();
                                }
                            }
                            if (size % COMMIT_BATCH_SIZE != 0) {
                                stmt.executeBatch();
                                conn.commit();
                                logger.info("本分区最后一批数据已保存：数据量：{}", size);
                                logger.info("本分区数据已全部插入");
                            }
                        });
                    } catch (Exception e) {
                        logger.error("无法插入或更新数据：{}", e.getMessage());
                    }
                });
                break;
            default:
                logger.error("不支持该Sink类型：{}", sinkType);
        }
        return writer;
    }


}
