package com.wyt.spark.clickhouse;

import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.execution.datasources.jdbc.JDBCOptions;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.ResultSet;
import java.sql.Statement;
import java.util.List;
import java.util.Properties;

/**
 * @Description:
 * @Author wangyongtao
 * @Date 2021/10/9 15:00
 **/
public class ClickhouseUtils {
    private static Logger logger = LoggerFactory.getLogger(ClickhouseUtils.class);
    private static String batchsize = "10000";
    private static String isolationLevel = "NONE";
    private static String numPartitions = "1";


    public static String url;

    public static SparkSession sparkSession;

    //未考虑并发使用连接的情况，如果有需求可以再此工具类基础上增加连接池
    public static Connection connection;

    public static Statement statement;

    public static boolean init(String ckUrl, SparkSession ckSparkSession) {
        url = ckUrl;
        sparkSession = ckSparkSession;
        try {
            Class.forName("com.github.housepower.jdbc.ClickHouseDriver");
            connection = DriverManager.getConnection(ckUrl);
            statement = connection.createStatement();
        } catch (Exception e) {
            logger.error("clickhouse jdbc初始化连接异常：", e);
            return false;
        }
        return true;
    }

    public static boolean dropTable(String db, String table, String clusterName, boolean isSync, Properties selfProp) {
        StringBuffer sb = new StringBuffer("DROP TABLE IF EXISTS ").append(db).append(".").append(table);
        if (clusterName != null) {
            sb.append(" ON CLUSTER ").append(clusterName);
        }
        if (isSync) {
            sb.append(" sync ");
        }
        return execute(sb.toString(),selfProp);
    }

    public static boolean createTable(String sql,Properties selfProp){
        return execute(sql, selfProp);
    }

    public static boolean dropTablePartitions(String db, String table, List<String> partitions,String clusterName,Properties selfProp){
        String sqlBase = null;
        if (clusterName!=null){
            sqlBase = "ALTER TABLE " + db + "." + table + " ON CLUSTER " + clusterName + " DROP PARTITION ";
        }else {
            sqlBase = "ALTER TABLE " + db + "." + table + " DROP PARTITION ";
        }
        boolean dropFlag = false;
        for (int i = 0; i < partitions.size(); i++) {
            String partition = partitions.get(i);
            String exeSql = sqlBase + " '" + partition + "'";
            dropFlag = execute(exeSql,selfProp);
            logger.info("分区[{}]删除标识为[{}]", partition, dropFlag);
        }
        return dropFlag;
    }

    public static boolean truncateTable(String db, String table, String clusterName, boolean isSync, Properties selfProp){
        StringBuffer sb = new StringBuffer("TRUNCATE TABLE IF EXISTS ").append(db).append(".").append(table);
        if (clusterName != null) {
            sb.append(" ON CLUSTER ").append(clusterName);
        }
        if (isSync) {
            sb.append(" sync ");
        }
        return execute(sb.toString(),selfProp);
    }

    public static boolean renameTable(String db,String oldTable,String newTable,String clusterName){
        logger.info("rename{} {} to {} ", db , oldTable, newTable);
        StringBuffer sb = new StringBuffer("RENAME TABLE ")
                .append(db).append(".").append(oldTable).append(" TO ")
                .append(db).append(".").append(newTable);
        if (clusterName!=null){
            sb.append(" ON CLUSTER ").append(clusterName);
        }
        return execute(sb.toString(),null);
    }

    public static boolean movePartition(String db,String table,String suffix,String clusterName,List<String> partitions){
        String dbTableName = db + "." + table;
        String backDbTableName = dbTableName + suffix;
        boolean movePartitionFlag = false;
        for (int i = 0; i < partitions.size(); i++) {
            String partition = partitions.get(i);
            String sql = "ALTER TABLE " + backDbTableName + " MOVE PARTITION " + " '" + partition + "'" + " TO TABLE " + dbTableName;
            movePartitionFlag = execute(sql,null);
            logger.info("分区[{}]迁移标识为[{}]", partition, movePartitionFlag);
            if (!movePartitionFlag) return movePartitionFlag;
        }
        return true;
    }

    public static boolean insertBySpark(String sql,String db,String table,Properties props){
        String dbTableName = db + "." + table;
        logger.info("执行clickhouse插入进程，从hive中提取数据的sql为[{}],待插入的ck表为[{}]", sql, dbTableName);
        try {
            sparkSession.sql(sql).repartition(Integer.valueOf(numPartitions)).write()
                    .mode(SaveMode.Append)
                    .option(JDBCOptions.JDBC_BATCH_INSERT_SIZE(), batchsize)
                    .option(JDBCOptions.JDBC_TXN_ISOLATION_LEVEL(), isolationLevel)
                    .option(JDBCOptions.JDBC_NUM_PARTITIONS(), numPartitions)
                    .jdbc(url, dbTableName, props);
        } catch (Exception e) {
            logger.error("clickhouse批量插入异常，异常原因为：", e);
            return false;
        }
        return true;
    }

    public static void close(){
        if (connection!=null){
            try {
                connection.close();
            } catch (Exception e) {
                logger.error("clickhouse 连接关闭异常：",e);
            }
        }
        if (statement!=null){
            try {
                statement.close();
            } catch (Exception e) {
                logger.error("clickhouse statement 关闭异常：",e);
            }
        }
    }

    private static boolean execute(String sql,Properties selfProp){
        logger.info("执行 clickhouse 语句[{}]：",sql);
        if (selfProp != null && !selfProp.isEmpty()){
            try (Connection selfConn = DriverManager.getConnection(url,selfProp);
                 Statement selfState = selfConn.createStatement();) {
                selfState.execute(sql);
            } catch (Exception e) {
                logger.error("clickhouse sql异常，异常为：", e);
                return false;
            }
        }else {
            try {
                statement.execute(sql);
            } catch (Exception e) {
                logger.error("clickhouse sql异常，异常为：", e);
                return false;
            }
        }
        return true;
    }

    public static void main(String[] args) {

        String url = "jdbc:clickhouse://172.172.178.245:9000/gdm";

        try (Connection selfConn = DriverManager.getConnection(url);
             Statement selfState = selfConn.createStatement();) {
            ResultSet resultSet = selfState.executeQuery("select * from gdm.test_local_file2");
            while (resultSet.next()){
                System.out.println(resultSet.getObject(1));
            }
        } catch (Exception e) {
            logger.error("clickhouse sql异常，异常为：", e);
        }

    }

//    public static void main(String[] args) {
//        String url = "jdbc:clickhouse://1.15.38.124:9000/system";
//        try (
//                Connection conn = DriverManager.getConnection(url, "default", null);
//                Statement state = conn.createStatement()
//        ) {
//            ResultSet resultSet = state.executeQuery("select * from system.tables");
//            while (resultSet.next()){
//                System.out.println(resultSet.getObject(1));
//            }
//        } catch (Exception e) {
//            logger.error("clickhouse建表语句执行异常，建表语句是：[{}],异常信息是：", e);
//        }
//    }

}
