package com.edata.bigdata.spark;

import com.edata.bigdata.basic.Commons;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.types.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.util.List;
import java.util.Properties;

public class PgWriter {

    public Logger logger = LoggerFactory.getLogger(this.getClass());
    public PgConnector connector;
    public SparkSession session;

    public <T> void insert(List<Row> rows, Class<T> clazz) {
        StructType schema = Commons.createDataFrameSchema(clazz);
        Dataset<Row> data = session.createDataFrame(rows, schema);
        String tableName = clazz.getSimpleName().toLowerCase();
        Properties props = new Properties();
        props.setProperty("user", connector.JDBC_USER);
        props.setProperty("password", connector.JDBC_PASSWORD);
        logger.info("正在写入数据 {}", tableName);
        data.write()
                .mode("append")
                .jdbc(connector.getURL(), tableName, props);
    }

    public <T> void insert(Dataset<T> data, Class<T> clazz) {
        String tableName = clazz.getSimpleName().toLowerCase();
        Properties props = new Properties();
        props.setProperty("user", connector.JDBC_USER);
        props.setProperty("password", connector.JDBC_PASSWORD);
        logger.info("正在写入数据 {}", tableName);
        data.write()
                .mode("append")
                .jdbc(connector.getURL(), tableName, props);

    }

    public <T> void upsert(Dataset<T> data, Class<T> clazz, Properties props) {
        String tableName = clazz.getSimpleName().toLowerCase();
        try {
            String sql = Commons.createUnPreparedUpsertSQLByClazz(
                    props.getProperty("jdbc.upsert.conflict.field.name"),
                    clazz
            );

            data.foreachPartition(iter -> {
                Connection conn = DriverManager.getConnection(
                        connector.getURL(),
                        connector.getJDBC_USER(),
                        connector.getJDBC_PASSWORD()
                );
                conn.setAutoCommit(false);
                PreparedStatement stmt = conn.prepareStatement(sql);
                int size = 0;
                int COMMIT_BATCH_SIZE = 1000;
                while (iter.hasNext()) {
                    T record = iter.next();
                    Commons.setPreparedStatementParameters(stmt, record, clazz);
                    stmt.addBatch();
                    size++;
                    if (size % COMMIT_BATCH_SIZE == 0) {
                        stmt.executeBatch();
                        conn.commit();
                        logger.info("本分区已保存部分数据，数据量：{}", size);
                        stmt.clearBatch();
                    }
                }
                if (size % COMMIT_BATCH_SIZE != 0) {
                    stmt.executeBatch();
                    conn.commit();
                    logger.info("本分去最后一批数据已保存：数据量：{}", size);
                    logger.info("本分区数据已全部插入");
                }
            });
        } catch (Exception e) {
            logger.error("无法插入或更新数据：{}", e.getMessage());
        }
    }


    public PgWriter(SparkSession session) {
        this.session = session;
    }


}
