package com.wyt.spark.clickhouse;

import com.alibaba.fastjson.JSONObject;
import com.github.housepower.jdbc.ClickHouseConnection;
import com.github.housepower.jdbc.settings.ClickHouseConfig;
import com.wyt.spark.clickhouse.bean.FreemarkCreateTableBean;
import freemarker.template.Configuration;
import freemarker.template.Template;
import freemarker.template.Version;
import org.apache.spark.api.java.function.ForeachPartitionFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Serializable;

import java.io.File;
import java.io.StringWriter;
import java.io.Writer;
import java.sql.*;
import java.time.Duration;
import java.util.Iterator;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * @ClassName ClickHouseJdbcUtils
 * @Description: 封装clickhouse的jdbc调用
 * @Author wangyongtao
 * @Date 2021/7/22 10:54
 * @Version 1.0
 **/
public class ClickHouseJdbcUtils implements Serializable {
    transient private static Logger logger = LoggerFactory.getLogger(ClickHouseJdbcUtils.class);


    /**
     * @Description 1、获取ck连接，有个charset参数没有放进来，因为后续spark处理会报序列化问题，后面可以视情况进行调整使用
     * @param ckUrl          ck连接的url信息，注意要加上数据库的信息，因为ck会解析改url提取host、port、database信息
     * @param username       用户名
     * @param password       密码
     * @param queryTimeout   查询超时时间
     * @param connectTimeout 连接超时时间
     * @Param tcpKeepAlive 是否开启tcp保活，clickhouse的jdbc默认连接有两种，一种是基于8123端口的http连接（性能教差），一种是基于9000端口的tcp连接
     * @Param charset 好像是控制对传输数据的编码方式
     * @author wangyongtao
     * @date 2021/7/22 11:20
     */
    public static Connection getCkConnection(String ckUrl, String username, String password, boolean tcpKeepAlive,
                                             Duration queryTimeout, Duration connectTimeout) {
        try {
            ClickHouseConfig config = ClickHouseConfig.Builder.builder()
                    .withJdbcUrl(ckUrl)
                    .user(username)
                    .password(password)
                    .tcpKeepAlive(tcpKeepAlive)
                    .queryTimeout(queryTimeout)
                    .connectTimeout(connectTimeout)
                    .build();
            return ClickHouseConnection.createClickHouseConnection(config);
        } catch (Exception e) {
            logger.error("获取ck连接异常，异常信息为：", e);
        }
        return null;
    }

    /**
     * @Description 2、关闭clickhouse连接，一般用try(resource) catch的格式就不用再主动关闭
     * @param connection 待关闭的连接
     * @author wangyongtao
     * @date 2021/7/22 11:29
     */
    public static boolean closeCkConnection(Connection connection) {
        if (connection != null) {
            try {
                connection.close();
                return true;
            } catch (SQLException throwables) {
                logger.error("clickhouse 连接关闭异常");
            }
            return false;
        } else {
            return true;
        }
    }

    /**
     * @Description 3、删除clickhouse表中符合条件的数据
     * @param connection ck连接
     * @param deleteSql  删除数据的sql语句
     * @author wangyongtao
     * @date 2021/7/22 11:32
     */
    public static boolean deleteCkTableByJdbc(Connection connection, String deleteSql) {
        logger.info("执行clickhouse的delete语句：[{}]", deleteSql);
        try (Statement statement = connection.createStatement();) {
            return statement.execute(deleteSql);
        } catch (Exception e) {
            logger.error("clickhouse的delete语句执行异常，deleteSql=[{}],异常信息为：", deleteSql, e);
            return false;
        }
    }

    /**
     * @Description 3、以原生JDBC方式获取连接并执行 clickhouse 删除语句
     * @author wangyongtao
     */
    public boolean deleteCkTableDefault(String sql,String ckUrl) {
        logger.info("执行clickhouse的delete语句：[{}]", sql);
        try {
            Class.forName("com.github.housepower.jdbc.ClickHouseDriver");
        } catch (Exception e) {
            logger.error("com.github.housepower.jdbc.ClickHouseDriver 驱动加载异常：", e);
            return false;
        }
        try (Connection connection = DriverManager.getConnection(ckUrl);
             Statement statement = connection.createStatement();) {
            //如果删除行数为0，只要执行正常就会有返回
            return statement.execute(sql);
        } catch (Exception e) {
            logger.error("clickhouse删除操作异常，异常为：", e);
            return false;
        }
    }

    /**
     * @Description 4、将spark查出的数据集以原生JDBC方式批量插入clickhouse
     * @param dataset 数据集
     * @param numPartitions 分区数
     * @param batchSize clickhouse批量存储数
     * @param ckUrl clickhouse连接url，注意精确到数据库
     * @param username clickhouse用户名
     * @param password clickhouse密码
     * @param tcpKeepAlive clickhouse连接是否tcp保活
     * @param queryTimeout clickhouse查询超时时间
     * @param connectTimeout clickhouse连接超时时间
     * @param dbName clickhouse数据库名
     * @param tableName clickhouse表名
     * @author wangyongtao
     * @date 2021/7/22 20:51
     */
    public static void batchInsertCKFromDataSet(Dataset<Row> dataset, int numPartitions, int batchSize,
                                                String ckUrl, String username, String password, boolean tcpKeepAlive,
                                                Duration queryTimeout, Duration connectTimeout, String dbName, String tableName) {
        //1、获取数据集中字段的个数，并字段的占位符到sql语句中
        StructField[] fields = dataset.schema().fields();
        StringBuffer stringBuffer = new StringBuffer();
        int fieldLen = fields.length;
        for (int i = 0; i < fieldLen; i++) {
            stringBuffer.append(" ? ").append(",");
        }
        stringBuffer.deleteCharAt(stringBuffer.length() - 1);
        String sqlText = "INSERT INTO " + dbName + "." + tableName + " VALUES (" + stringBuffer.toString() + ")";

        //2、分区遍历处理数据集
        dataset.coalesce(numPartitions).foreachPartition(new ForeachPartitionFunction<Row>() {
            @Override
            public void call(Iterator<Row> t) throws Exception {

                //3、获取clickhouse的连接，并构建预处理对象PreparedStatement
                logger.info("分区处理dataset");
                try (Connection conn = getCkConnection(ckUrl,username,password,tcpKeepAlive,queryTimeout,connectTimeout);
                     PreparedStatement statm = conn.prepareStatement(sqlText);) {

                    //4、定义原子整型变量，用于lambda表达式中统计PreparedStatement中待插入的数据量，一旦超过batchSize阈值就可以批量提交一次
                    AtomicInteger count = new AtomicInteger(0);

                    //5、遍历每个分区中的行数据
                    t.forEachRemaining(line -> {
                        try {

                            //6、遍历每行数据中的所有字段，可以对字段类型进行转换，然后通过set*方法，将值设置到PreparedStatement当前一条的插入语句中
                            StructField[] lineFields = line.schema().fields();
                            int lineFieldsLen = lineFields.length;
                            for (int i = 0; i < lineFieldsLen; i++) {
                                if (lineFields[i].dataType().sameType(DataTypes.BooleanType)) {
                                    if (line.getBoolean(i)) {
                                        statm.setInt(i+1, 1);
                                    } else {
                                        statm.setInt(i+1, 0);
                                    }
                                }else {
                                    statm.setObject(i+1,line.get(i));
                                }
                            }

                            //7、将上一步设置的字段值，映射到PreparedStatement批量提交的语句中，自增待提交的条数，超过阈值则提交并设置count为0
                            statm.addBatch();
                            count.incrementAndGet();
                            if (count.get() >= batchSize) {
                                statm.executeBatch();
                                Thread.sleep(1000);
                                count.set(0);
                            }
                        }catch (Exception e){
                            logger.error("一行数据处理异常，异常信息：",e);
                        }
                    });

                    //8、遍历结束，如果待提交批量数没达到阈值也提交存储
                    if (count.get() > 0) {
                        statm.executeBatch();
                        Thread.sleep(1000);
                    }
                    conn.commit();
                } catch (Exception e) {
                    logger.error("spark处理当前分区数据插入clickhouse异常：", e);
                }
            }
        });
    }

    /**
     * @Description 4、将spark查出的数据集以SparkJdbc方式批量插入clickhouse
     * @param sql spark查询的sql语句
     * @param sparkSession
     * @param ckTableName 要插入的clickhouse表名称，尽量传递 db.table 的形式
     * @param batchsize sparkjdbc批量插入的值
     * @param ckUrl clickhouse的连接url，可以精确到数据库，如果没有精确到数据库，上面ckTableName如果不是默认库，就一定要带库名称
     * @param ck SparkJdbc连接clickhouse的一些参数设置
     * @author wangyongtao
     * @date 2021/7/22 20:53
     */
    public void batchInsertCkBySparkJdbc(String sql, SparkSession sparkSession, String ckTableName, String batchsize, String ckUrl, Properties ck) {
        //日志记录sql等请求信息
        logger.info("spark执行的查询sql为[{}]",sql);
        //执行
        sparkSession.sql(sql).write()
                .mode(SaveMode.Append)
                .option(JDBCOptions.JDBC_BATCH_INSERT_SIZE(), batchsize)
                .option(JDBCOptions.JDBC_TXN_ISOLATION_LEVEL(), "NONE")
                .option(JDBCOptions.JDBC_NUM_PARTITIONS(), "1")
                .option("isolationLevel", "NONE")
                .jdbc(ckUrl, ckTableName, ck);
    }

    /**
     * @Description 5、通过JDBC在clickhouse中根据freemarker模板建表
     * @author wangyongtao
     */
    public void createTableInCk(FreemarkCreateTableBean bean,String ckUrl) {
        logger.info("开始创建clickhouse表[{}]", bean.getTableName());
        //1、获取建表sql脚本
        // 第一步：创建一个Configuration对象，直接new一个对象。构造方法的参数就是freemarker对于的版本号。
        Configuration configuration = new Configuration(new Version(2, 3, 31));
        // 第二步：设置模板文件所在的路径。
        String rootPath = Thread.currentThread().getContextClassLoader().getResource("").getPath();
        String sql = null;
        try (Writer out = new StringWriter();) {
            configuration.setDirectoryForTemplateLoading(new File(rootPath));
            // 第三步：设置模板文件使用的字符集。一般就是utf-8.
            configuration.setDefaultEncoding("utf-8");
            //configuration.setTemplateExceptionHandler(TemplateExceptionHandler.IGNORE_HANDLER);
            // 第四步：加载一个模板，创建一个模板对象。
            Template template = configuration.getTemplate("createCkTable.sql");
            // 第五步：创建一个Writer对象，一般创建一FileWriter对象，指定生成的文件名。
            // 第六步：调用模板对象的process方法输出文件。
            template.process(bean, out);
            sql = ((StringWriter) out).getBuffer().toString();
            Class.forName("com.github.housepower.jdbc.ClickHouseDriver");
            logger.info("生成的clickhouse建表sql为：[{}]", sql);
        } catch (Exception e) {
            logger.error("生成clickhouse建表语句异常，待生成的语句模板是：[{}],异常信息是：", JSONObject.toJSONString(bean), e);
        }
        try (
                Connection conn = DriverManager.getConnection(ckUrl, "default", null);
                Statement state = conn.createStatement()
        ) {
            state.execute(sql.toString());
        } catch (Exception e) {
            logger.error("clickhouse建表语句执行异常，建表语句是：[{}],异常信息是：", sql.toString(), e);
        }
        logger.info("clickhouse建表结束，新建表为[{}]", bean.getTableName());
    }

    /**
     * @Description 5、通过JDBC在clickhouse中创建副本表
     * @param connection clickhouse连接
     * @param cluster 集群
     * @param dbName 库名
     * @param tableName 表名
     * @param fields 字段列表
     * @param partitionColumns 分区字段列表
     * @param indexColumns 索引字段列表
     * @author wangyongtao
     * @TODO 待测试
     */
    public void createReplicatedMergeTreeTable(ClickHouseConnection connection, String cluster, String dbName, String tableName, StructField[] fields, List<String> partitionColumns,List<String> indexColumns){
        //1、拼接分区字段
        String partitionStr = "";
        if (partitionColumns!=null&&partitionColumns.size()>0){
            String tmp = "";
            for (String column:partitionColumns){
                tmp = tmp + ",";
            }
            tmp = tmp.substring(0,tmp.length()-1);
            partitionStr = "PARTITION BY (" + tmp + ")";
        }
        //2、拼接索引字段
        String orderbyStr = "";
        if (indexColumns!=null&&indexColumns.size()>0){
            String tmp = "";
            for (String index:indexColumns){
                tmp = tmp + ",";
            }
            tmp = tmp.substring(0,tmp.length()-1);
            orderbyStr = "ORDER BY (" + tmp + ")";
        }
        //3、创建主建表语句，并拼接对应字段信息
        StringBuffer createSql = new StringBuffer();
        createSql.append("CREATE TABLE IF NOT EXISTS ")
                .append(dbName).append(".").append(tableName)
                .append(" ON CLUSTER ").append(cluster)
                .append("( ");
        for (StructField field:fields){
            createSql.append(field.name()).append(" ").append(field.dataType().typeName()).append(",");
        }
        createSql.deleteCharAt(createSql.length()-1);
        //4、拼接引擎等参数信息
        createSql.append(")")
                .append("ENGINE = ReplicatedMergeTree(")
                //设置自定义副本引擎参数
                .append(")")
                .append(partitionStr)
                .append(orderbyStr);
        logger.info("待执行创建副本表sql为：[{}]",createSql.toString());
        //5、执行建表语句
        try (PreparedStatement stmt = connection.prepareStatement(createSql.toString())){
            stmt.executeUpdate();
        }catch (Exception e){
            logger.error("创建副本表异常，异常信息为：",e);
        }
    }

    /**
     * @Description 5、创建分布式表
     * @param connection clickhouse连接
     * @param cluster 集群信息
     * @param dbName 数据库名
     * @param tableName 表名
     * @author wangyongtao
     * @TODO 待测试
     */
    public void createDistributedTable(ClickHouseConnection connection, String cluster, String dbName, String tableName){
        String tabelDistributed = tableName + "_distributed";
        StringBuffer createSql = new StringBuffer();
        createSql.append("CREATE TABLE IF NOT EXISTS ").append(dbName).append(".").append(tableName).append(" ON CLUSTER ").append(cluster)
                .append(" AS ").append(dbName).append(".").append(tableName)
                .append(" ENGINE =  Distributed(").append(cluster).append(",").append(dbName).append(",").append(tableName).append(",").append("rand()").append(")");
        logger.info("创建分布式表语句为[{}]:",createSql.toString());
        try (PreparedStatement stmt = connection.prepareStatement(createSql.toString())){
            stmt.executeUpdate();
        }catch (Exception e){
            logger.error("创建分布式表异常，异常信息为：",e);
        }
    }





}
