package com.edata.bigdata.spark.streaming;

import com.edata.bigdata.basic.Commons;
import org.apache.spark.sql.*;

import static org.apache.spark.sql.functions.*;

import org.apache.spark.sql.streaming.DataStreamReader;
import org.apache.spark.sql.types.StructType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Properties;


public class StreamSource {
    public Logger logger = LoggerFactory.getLogger(this.getClass());
    public SparkSession sparkSession;

    public enum SourceType {
        PARQUET, JSON, CSV, ORC, TEXT, FILE_BOUNDED, FILE_CONTINUOUS, KAFKA
    }

    /*
     * 创建流式读取时，对于文件类型，被读取的文件在过程中不要被修改或者覆盖
     * */
    public DataStreamReader createDataStreamReader(SourceType type,
                                                   Properties options) {
        DataStreamReader reader = sparkSession.readStream();
        switch (type) {
            case PARQUET:
                reader.format("parquet");
                reader.option("path", options.getProperty("path"));
                break;
            case JSON:
                reader.format("json");
                reader.option("path", options.getProperty("path"));
                reader.option("multiLine", true);
                break;
            case CSV:
                reader.format("csv");
                reader.option("path", options.getProperty("path"));
                break;
            case ORC:
                reader.format("orc");
                reader.option("path", options.getProperty("path"));
                break;
            case TEXT:
                reader.format("text");
                reader.option("path", options.getProperty("path"));
                break;
            case KAFKA:
                reader.format("kafka");
                reader.option("kafka.bootstrap.servers", options.getProperty("kafka.bootstrap.servers"));
                reader.option("subscribe", options.getProperty("subscribe"));
                reader.option("group.id", options.getProperty("group.id"));
                reader.option("startingOffsets", options.getProperty("startingOffsets"));
                reader.option("failOnDataLoss", false);
                reader.option("kafka.session.timeout.ms", options.getProperty("kafka.session.timeout.ms"));
                break;
            default:
                logger.error("不支持为该类型 {} 数据源创建流式读取", type);
                return null;
        }
        reader.option("maxFilesPerTrigger", 100);     // 文件源防雪崩
        reader.option("cleanSource", "archive");// 3.5+ 自动归档已处理文件
        return reader;
    }

    public Dataset<Row> applyWindowAndWatermark(Dataset<Row> data,
                                                String windowDuration,
                                                String slideDuration,
                                                String watermarkDelay) {
        //将timestamp列的字符串转成timestamp格式，并改名为“event_time”
        //将value列的数据解码，转成字符串，并改名为“data”
        //选择data，event_time两列，其他列丢掉
        Dataset<Row> windowed = data
                .withColumn("event_time", col("timestamp").cast("timestamp"))
                .withColumn("data", col("value").cast("string"))
                .select("data", "event_time");

        if (watermarkDelay != null && !watermarkDelay.isBlank()) {
            windowed = windowed.withWatermark("event_time", watermarkDelay);
        }

        Column windowCol = window(
                col("event_time"),
                windowDuration,
                slideDuration != null ? slideDuration : windowDuration
        );
        return windowed.withColumn("window", windowCol);
    }

    public <T> Dataset<T> toBeanMapper(Dataset<Row> rows, Class<T> clazz) {
        if (rows == null) {
            logger.error("rows为空");
            return null;
        }
        if (clazz == null) {
            logger.error("clazz为空");
            return null;
        }
        logger.info("正在从数据类型 Dataset<Row> 映射为 Dataset<{}>", clazz.getSimpleName());
        StructType schema = Commons.createDataFrameSchema(clazz);
        Dataset<Row> dataStream = rows.select(from_json(col("data"), schema).as("data")).select("data.*");
        return dataStream.as(Encoders.bean(clazz));
    }

    public StreamSource(SparkSession sparkSession) {
        this.sparkSession = sparkSession;
    }
}
