package com.powerdata.core.paimon;


import com.alibaba.fastjson2.JSONObject;
import com.powerdata.core.paimon.bean.table.*;
import com.powerdata.core.paimon.catalog.CatalogProperties;
import com.powerdata.core.paimon.catalog.PDHdfsAvroUtils;
import com.powerdata.core.paimon.catalog.PDHdfsUtils;
import com.powerdata.core.paimon.engine.PDPaimonFlinkUtils;
import com.powerdata.core.paimon.enums.PaimonBaseType;
import org.apache.commons.collections4.MapUtils;
import org.apache.commons.lang3.ObjectUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;

import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.paimon.Snapshot;
import org.apache.paimon.catalog.*;
import org.apache.paimon.data.BinaryString;
import org.apache.paimon.data.GenericRow;
import org.apache.paimon.data.InternalRow;
import org.apache.paimon.flink.FlinkFileIO;
import org.apache.paimon.fs.hadoop.HadoopFileIO;
import org.apache.paimon.hive.HiveCatalog;
import org.apache.paimon.jdbc.JdbcCatalog;
import org.apache.paimon.jdbc.JdbcCatalogFactory;
import org.apache.paimon.options.Options;
import org.apache.paimon.predicate.In;
import org.apache.paimon.predicate.PredicateBuilder;
import org.apache.paimon.reader.RecordReader;
import org.apache.paimon.schema.Schema;
import org.apache.paimon.schema.SchemaManager;
import org.apache.paimon.schema.TableSchema;
import org.apache.paimon.table.Table;
import org.apache.paimon.table.sink.*;
import org.apache.paimon.table.source.ReadBuilder;
import org.apache.paimon.table.source.Split;
import org.apache.paimon.table.source.TableRead;
import org.apache.paimon.types.*;
import org.apache.paimon.utils.SnapshotManager;
import org.eclipse.jetty.util.ajax.JSON;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.net.URI;
import java.nio.ByteBuffer;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.Statement;
import java.text.DecimalFormat;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.util.*;
import java.util.stream.Collectors;
import java.util.zip.GZIPInputStream;

import org.apache.paimon.fs.Path;

/**
 * https://www.jianshu.com/p/f8a518d9f6ff
 */
public class PDPaimonUtils {

    private static final Logger log = LoggerFactory.getLogger(PDPaimonUtils.class);

    //    private String uploadHiveFilePath = "/opt/paimonManager/hiveconf/";
    private Catalog catalog;
    private String type;
    private String warehouse;
    private String hiveUri;
    private String catalogId;

    private String execUser;

    private PDPaimonUtils(String catalogId, String type, String hiveUrl, String warehousePath, String hadoopUser, String uploadHiveFilePath,
                          Map<String, Object> extraProperties) throws Exception {
        execUser = hadoopUser;
        System.setProperty("HADOOP_USER_NAME", hadoopUser);
        final Configuration configuration = new Configuration();
        if ("jdbc".equals(type)) {
            Path dataPath = new Path(warehousePath);

            HadoopFileIO hadoopFileIO = new HadoopFileIO();

//            hadoopFileIO.setFileSystem(dataPath);

            JdbcCatalogFactory jdbcCatalogFactory = new JdbcCatalogFactory();
            CatalogContext catalogContext = CatalogContext.create(dataPath);
            JdbcCatalog jdbcCatalog = (JdbcCatalog) jdbcCatalogFactory.create(hadoopFileIO, dataPath, catalogContext);

            catalog = jdbcCatalog;
        } else if ("hadoop".equals(type)) {

            Catalog hadoopCatalog = createHadoopCatalog(warehousePath);

            catalog = hadoopCatalog;
        } else if ("hive".equals(type)) {
//            String dir = uploadHiveFilePath + catalogId + "/";
//            Set<String> fileList =
//                    Arrays.stream(Objects.requireNonNull(new File(dir).listFiles()))
//                            .map(File::getName)
//                            .collect(Collectors.toSet());
//            List<String> files = Arrays.asList("core-site.xml", "hdfs-site.xml", "hive-site.xml");
//            if (!fileList.containsAll(files)) {
//                throw new Exception("hive类型的Catalog缺失配置文件【core-site.xml、hdfs-site.xml、hive-site.xml】,请检查catalog管理是否维护配置文件");
//            }

            Catalog hiveCatalog = createHiveCatalog(warehousePath, hiveUrl);

            Map<String, String> prop = new HashMap<>();
            prop.put(CatalogProperties.WAREHOUSE_LOCATION, warehousePath);
            prop.put(CatalogProperties.URI, hiveUrl);
            prop.put(CatalogProperties.CATALOG_IMPL, "org.apache.paimon.hive.HiveCatalog");

            catalog = hiveCatalog;
        } else {
            // 本地文件系统
            Catalog localCatalog = createFilesystemCatalog(warehousePath);
            catalog = localCatalog;
        }
        this.warehouse = warehousePath;
        this.type = type;
        this.hiveUri = hiveUrl;
        this.catalogId = catalogId;
    }

    public static HashMap<String, PDPaimonUtils> hadoopCatalogHashMap = new HashMap<>();

    //初始化(catalog)对象
    public static PDPaimonUtils build(String catalogId, String type, String hiveUrl, String warehousePath, String hadoopUser, String uploadHiveFilePath) throws Exception {
        PDPaimonUtils pdPaimonUtils = hadoopCatalogHashMap.get(catalogId);
        Map<String, Object> extraProperties = new HashMap<>();
        if (ObjectUtils.isEmpty(pdPaimonUtils)) {
            hadoopCatalogHashMap.put(catalogId, new PDPaimonUtils(catalogId, type, hiveUrl, warehousePath, hadoopUser, uploadHiveFilePath, extraProperties));
        }
        return hadoopCatalogHashMap.get(catalogId);
    }

    //初始化(catalog)对象
    public static PDPaimonUtils build(String catalogId, String type, String hiveUrl, String warehousePath, String hadoopUser, String uploadHiveFilePath,
                                      Map<String, Object> extraProperties) throws Exception {
        PDPaimonUtils pdPaimonUtils = hadoopCatalogHashMap.get(catalogId);
        if (ObjectUtils.isEmpty(pdPaimonUtils)) {
            hadoopCatalogHashMap.put(catalogId, new PDPaimonUtils(catalogId, type, hiveUrl, warehousePath, hadoopUser, uploadHiveFilePath, extraProperties));
        }
        return hadoopCatalogHashMap.get(catalogId);
    }

    public static Catalog createFilesystemCatalog(String warehouse) {
        CatalogContext context = CatalogContext.create(new Path(warehouse));
        return CatalogFactory.createCatalog(context);
    }

    public static Catalog createHadoopCatalog(String warehouse) {

        Options options = new Options();
        options.set("warehouse", warehouse);
        Configuration hadoopConf = new Configuration();
        hadoopConf.addResource(warehouse);

        CatalogContext context = CatalogContext.create(options, hadoopConf);

        return CatalogFactory.createCatalog(context);
    }

    public static Catalog createHiveCatalog(String hdfsUrl, String hiveUri) {
        // Paimon Hive catalog relies on Hive jars
        // You should add hive classpath or hive bundled jar.
        Options options = new Options();
        options.set("warehouse", hdfsUrl);
        options.set("metastore", "hive");
        options.set("uri", hiveUri);
//        options.set("hive-conf-dir", "...");
//        options.set("hadoop-conf-dir", "...");
        CatalogContext context = CatalogContext.create(options);
        return CatalogFactory.createCatalog(context);
    }

    //获取(catalog)下的库名
    public List<String> listDataBases() {
        if ("hive".equals(type)) {
            List<String> namespaces = (catalog).listDatabases();
            List<String> hiveDB = getHiveDB(warehouse);
            return namespaces.stream().filter(hiveDB::contains).collect(Collectors.toList());
        }
        if ("jdbc".equals(type)) {
            return new ArrayList<>((catalog).listDatabases());
        }
        if ("hadoop".equals(type)) {
            return new ArrayList<>((catalog).listDatabases());
        }
        if ("file".equals(type)) {
            return new ArrayList<>((catalog).listDatabases());
        }
        return new ArrayList<>((catalog).listDatabases());
    }

    //创建库
    public void addDatabases(String dataBaseName) {
        try {
            boolean ignoreIfExists = true;
            Map<String, String> properties = new HashMap<>();
            if ("hive".equals(type)) {
                ((JdbcCatalog) catalog).createDatabase(dataBaseName, ignoreIfExists);
            } else if ("jdbc".equals(type)) {
                (catalog).createDatabase(dataBaseName, ignoreIfExists, properties);
            } else {
                (catalog).createDatabase(dataBaseName, ignoreIfExists, properties);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    //删除库
    public void deleteDatabases(String dataBaseName) {
        try {
            boolean ignoreIfExists = true;
            boolean cascade = true;
            if ("hive".equals(type)) {
                ((HiveCatalog) catalog).dropDatabase(dataBaseName, ignoreIfExists, cascade);
            } else if ("jdbc".equals(type)) {
                ((JdbcCatalog) catalog).dropDatabase(dataBaseName, ignoreIfExists, cascade);
            } else {
                (catalog).dropDatabase(dataBaseName, ignoreIfExists, cascade);
            }
        } catch (Exception e) {

        }
    }

    //获取库下的表列表信息
    public List<String> listTable(String databaseName) {
        try {
            if ("hive".equals(type)) {
                return new ArrayList<>(((HiveCatalog) catalog).listTables(databaseName));
            }
            if ("jdbc".equals(type)) {
                return new ArrayList<>(((JdbcCatalog) catalog).listTables(databaseName));
            }
            return new ArrayList<>((catalog).listTables(databaseName));
        } catch (Exception e) {
            e.printStackTrace();
        }
        return new ArrayList<>();
    }

    //获取表的元数据-metrics信息
    public TableMetricsBean getTableMetrics(String databaseName, String tableName) throws IOException {
        List<TableTransactionsBean> transactionsMessage = getTransactionsMessage(databaseName, tableName);
        if (ObjectUtils.isEmpty(transactionsMessage) || transactionsMessage.size() == 0) {
            return new TableMetricsBean();
        }
        Table table = getTable(Identifier.create(databaseName, tableName));
        String fullDatabaseName = databaseName + ".db";
        Map<String, String> tableOptions = table.options();
        String dataPath = tableOptions.get("path");
        FileSystem fileSystem = FileSystem.get(URI.create(dataPath), new Configuration());
        HashMap<String, Double> tableSizeMap = new HashMap<>();
        tableSizeMap.put("tableSize", 0.0);
        HashMap<String, Integer> filesMap = new HashMap<>();
        filesMap.put("files", 0);

        if (!fileSystem.exists(new org.apache.hadoop.fs.Path(dataPath))) {
            fileSystem.mkdirs(new org.apache.hadoop.fs.Path(dataPath));
        }
        // 获取表字段, 分区
//        List<TableColumnsBean> columnsOfTable = getColumnsOfTable(databaseName, tableName);
//        columnsOfTable.stream().filter(item -> item.)
        // 获取所有分区

        getMetrics(dataPath, fileSystem, tableSizeMap, filesMap);

        Double tableSize = tableSizeMap.get("tableSize");
        Integer files = filesMap.get("files");
        double avgFileSize = tableSize / (files == 0 ? 1 : files);
        TableTransactionsBean tableTransactionsBean = transactionsMessage.get(0);
        Long lastCommitTime = tableTransactionsBean.getCommitTime();
        String currentSnapshot = table.options().get("currentSnapshot");

        // 获取记录数量
//        Long records = getRecordsBySnapShotId(table, table.currentSnapshot().snapshotId());
        Long records = 0L;
        return new TableMetricsBean(formatDouble(tableSize / 1000), files, formatDouble(avgFileSize / 1000),
                lastCommitTime, records);
    }

    public Table getTable(Identifier of) {
        Table table = null;
        try {
            if ("hive".equals(type)) {
                table = ((HiveCatalog) catalog).getTable(of);
            } else if ("jdbc".equals(type)) {
                table = ((JdbcCatalog) catalog).getTable(of);
            } else {
                table = (catalog).getTable(of);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return table;
    }

    public Boolean tableExists(String databaseName, String tableName) {
        if ("hive".equals(type)) {
            return ((HiveCatalog) catalog).tableExists(Identifier.create(databaseName, tableName));
        }
        if ("jdbc".equals(type)) {
            return ((JdbcCatalog) catalog).tableExists(Identifier.create(databaseName, tableName));
        } else {
            return (catalog).tableExists(Identifier.create(databaseName, tableName));
        }
    }

    //获取列信息
    public List<TableColumnsBean> getColumnsOfTable(String databaseName, String tableName) {
        Table table = getTable(Identifier.create(databaseName, tableName));
        String path = table.options().get("path");
        Path dataPath = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
        Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
        if (tableSchemaOptional.isPresent()) {
            TableSchema tableSchema = tableSchemaOptional.get();
            List<String> partitionKeys = tableSchema.partitionKeys();
            List<String> primaryKeys = tableSchema.primaryKeys();
            List<DataField> fields = tableSchema.fields();

            return fields.stream().map(item -> {
                System.out.println(item);
                String name = item.name();
                String realName = PDPaimonColumnUtils.realColumnName(name);

                boolean isPrimaryKey = primaryKeys.contains(realName);
                boolean isPartitionKey = partitionKeys.contains(realName);

                String dataType = item.type().toString();
                String realDataType = PDPaimonColumnUtils.transform(dataType);
                //
                return new TableColumnsBean(item.id(), realName, realDataType, item.description(), item.type().isNullable(), isPrimaryKey, isPartitionKey);
            }).sorted(Comparator.comparingInt(TableColumnsBean::getId)).collect(Collectors.toList());
        }

        return new ArrayList<>();
    }


    //获取分区列表信息
    public List<TablePartitionKeysBean> getPartitionMessage(String databaseName, String tableName) {
        Table table = getTable(Identifier.create(databaseName, tableName));
        String path = table.options().get("path");
        Path dataPath = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
        Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
        if (tableSchemaOptional.isPresent()) {
            TableSchema tableSchema = tableSchemaOptional.get();
            List<DataField> fields = tableSchema.fields();
            Map<String, DataField> fieldMap = new HashMap<>();
            for (DataField dataField : fields) {
                fieldMap.put(dataField.name(), dataField);
            }
            return tableSchema.partitionKeys().stream()
                    .map(partitionField -> {
                        DataField dataField = fieldMap.get(partitionField);
                        return new TablePartitionKeysBean(dataField.id(),
                                dataField.name(),
                                dataField.name(),
                                dataField.type().toString());
                    })
                    .sorted(Comparator.comparingInt(TablePartitionKeysBean::getId))
                    .collect(Collectors.toList());
        }
        return new ArrayList<>();
    }

    public TableOptionBean getOptionsMessage(String databaseName, String tableName) {
        Table table = getTable(Identifier.create(databaseName, tableName));
        Map<String, String> options = table.options();
        JSONObject jsonObject = new JSONObject(options);

        TableOptionBean tableOptionBean = new TableOptionBean();
        tableOptionBean.setBucket(jsonObject.getString("bucket"));
        tableOptionBean.setBucketKey(jsonObject.getString("bucket-key"));
        tableOptionBean.setPath(jsonObject.getString("path"));
        tableOptionBean.setMergeEngine(jsonObject.getString("merge-engine"));
        return tableOptionBean;
    }

    //获取执行事务列表
    public List<TableTransactionsBean> getTransactionsMessage(String databaseName, String tableName) {
        ArrayList<TableTransactionsBean> resultList = new ArrayList<>();
        try {
            Table table = getTable(Identifier.create(databaseName, tableName));
            String path = table.options().get("path");
            Path dataPath = new Path(path);
            FlinkFileIO fileIO = new FlinkFileIO(dataPath);

            SnapshotManager schemaManager = new SnapshotManager(fileIO, dataPath);
//        Iterator<Snapshot> snapshots = schemaManager.snapshots();
            List<Snapshot> snapshots = schemaManager.safelyGetAllSnapshots();

            Snapshot currentSnapshot1 = schemaManager.latestSnapshot();
            if (ObjectUtils.isEmpty(currentSnapshot1)) {
                return resultList;
            }
            long currentSnapShotId = currentSnapshot1.id();
            for (Snapshot snapshot : snapshots) {
                long transactionId = snapshot.id();
                String parentId = "";

                //                String parentId = ObjectUtils.isEmpty(snapshot.parentId()) ? "" : snapshot.parentId() + "";
                Map<String, String> summary = new HashMap<>();
                String operation = snapshot.statistics();
                int addFiles = ObjectUtils.isEmpty(summary.get("added-data-files")) ? 0 :
                        new Integer(summary.get("added-data-files"));
                int deleteFiles = ObjectUtils.isEmpty(summary.get("deleted-data-files")) ? 0 :
                        new Integer(summary.get("deleted-data-files"));
                double addFileSize = ObjectUtils.isEmpty(summary.get("added-files-size")) ? 0 :
                        new Double(summary.get("added-files-size"));
                double deleteFIleSize = ObjectUtils.isEmpty(summary.get("removed-files-size")) ? 0 :
                        new Double(summary.get("removed-files-size"));
                long commit_time = snapshot.timeMillis();
                long records = ObjectUtils.isEmpty(summary.get("total-records")) ? 0 :
                        new Long(summary.get("total-records"));
                TableTransactionsBean tableTransactionsBean =
                        new TableTransactionsBean(transactionId, operation, addFiles, deleteFiles,
                                formatDouble(addFileSize / 1024), formatDouble(deleteFIleSize / 1024),
                                commit_time, transactionId, parentId);

                if (new Long(tableTransactionsBean.getSnapshotId()) == currentSnapShotId) {
                    tableTransactionsBean.setCurrent(true);
                }
                tableTransactionsBean.setRecords(records);
                resultList.add(tableTransactionsBean);
            }
            resultList.sort((o1, o2) -> o2.getCommitTime().compareTo(o1.getCommitTime()));
            return resultList;
        } catch (Exception e) {
            e.printStackTrace();
        }
        return resultList;
    }


    private Long getRecordsBySnapShotId(Table table, Long snapShotId) {
        String path = table.options().get("path");
        Path dataPath = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SnapshotManager snapshotManager = new SnapshotManager(fileIO, dataPath);

        Snapshot snapshot = snapshotManager.snapshot(snapShotId);
        if (ObjectUtils.isEmpty(snapshot)) {
            return 0L;
        }
        Map<String, String> summary = new HashMap<>(); // snapshot.summary();
        return ObjectUtils.isEmpty(summary.get("total-records")) ? 0 :
                new Long(summary.get("total-records"));
    }

    public RecordReader<InternalRow> batchReadTable(Table table, List<TableColumnsBean> columnsOfTable, Integer limit) {
        RecordReader<InternalRow> reader = null;
        try {
            int size = columnsOfTable.size();

            DataType[] types = new DataType[size];
            String[] names = new String[size];
            for (int i = 0; i < size; i++) {
                TableColumnsBean tableColumnsBean = columnsOfTable.get(i);
                types[i] = PaimonBaseType.getType(tableColumnsBean.getDataType()).getType();
                names[i] = tableColumnsBean.getColumnName();
            }

            PredicateBuilder builder =
                    new PredicateBuilder(RowType.of(types, names));


            ReadBuilder readBuilder =
                    table.newReadBuilder().withLimit(limit);

            // 2. Plan splits in 'Coordinator' (or named 'Driver')
            List<Split> splits = readBuilder.newScan().plan().splits();

            // 3. Distribute these splits to different tasks

            // 4. Read a split in task
            TableRead read = readBuilder.newRead();
            reader = read.createReader(splits);

        } catch (Exception e) {

        }
        return reader;

    }

    //全量数据获取，接口返回不推荐直接调用，推荐分页处理
    public Map<String, Object> getTableData(String databaseName, String tableName, Integer pageSize, Integer pageNum) throws Exception {
        Table table = getTable(Identifier.create(databaseName, tableName));

        String path = table.options().get("path");
        Path dataPath = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SnapshotManager snapshotManager = new SnapshotManager(fileIO, dataPath);
        Snapshot snapshot = snapshotManager.latestSnapshot();

        List<TableColumnsBean> columnsOfTable = getColumnsOfTable(databaseName, tableName);

        int limit = (pageSize * pageNum);
        RecordReader<InternalRow> reader = batchReadTable(table, columnsOfTable, limit);

        Map<String, Object> result = getDataMapByPages(pageSize, pageNum, columnsOfTable, reader);
        if (ObjectUtils.isEmpty(snapshot)) {
            result.put("total", 0L);
        } else {
            result.put("total", getRecordsBySnapShotId(table, snapshot.id()));
        }

        return result;
    }

    //指定snapshot，获取该snapshot下的数据状态
    public Map<String, Object> getTableData(String databaseName, String tableName, Long snapshotId,
                                            Integer pageSize, Integer pageNum) throws Exception {
        Table table = getTable(Identifier.create(databaseName, tableName));
        List<String> colNames = getColumnNameList(table);

//        String path = table.options().get("path");
//        Path dataPath = new Path(path);
//        FlinkFileIO fileIO = new FlinkFileIO(dataPath);
//
//        SnapshotManager snapshotManager = new SnapshotManager(fileIO, dataPath);
//        Snapshot snapshot = snapshotManager.snapshot(snapshotId);

//        CloseableIterable<Record> records = PaimonGenerics.read(table).useSnapshot(snapshotId).build();
//        Map<String, Object> result = getDataMapByPages(pageSize, pageNum, colNames, records);
//        result.put("total", getRecordsBySnapShotId(table, snapshotId));

        Map<String, Object> result = new HashMap<>();

        return result;
    }

    //获取某个时间之前的数据状态
    public Map<String, Object> getTableDataBeforeTime(String databaseName, String tableName, Long asOfTime,
                                                      Integer pageSize, Integer pageNum) throws Exception {
//        Table table = getTable(Identifier.create(databaseName, tableName));
//        List<String> colNames = getColumnNameList(table);
//        String snapshotId = getTransactionsMessage(databaseName, tableName)
//                .stream()
//                .filter(a -> a.getCommitTime() <= asOfTime)
//                .max(Comparator.comparing(TableTransactionsBean::getCommitTime))
//                .get()
//                .getSnapshotId();
//        CloseableIterable<Record> records = PaimonGenerics.read(table).useSnapshot(new Long(snapshotId)).build();
//        Map<String, Object> result = getDataMapByPages(pageSize, pageNum, colNames, records);
//        result.put("total", getRecordsBySnapShotId(table, new Long(snapshotId)));

        Map<String, Object> result = new HashMap<>();

        return result;
    }

    //获取两个snapshot之间新增的数据//无总数
    public Map<String, Object> getAppendsTableData(String databaseName, String tableName,
                                                   Long fromSnapshotId, Long toSnapshotId,
                                                   Integer pageSize, Integer pageNum) throws Exception {
//        Table table = getTable(Identifier.create(databaseName, tableName));
//        List<String> colNames = getColumnNameList(table);
//        CloseableIterable<Record> records =
//                PaimonGenerics.read(table).appendsBetween(fromSnapshotId, toSnapshotId).build();
//        return getDataMapByPages(pageSize, pageNum, colNames, records);

        Map<String, Object> result = new HashMap<>();
        return result;
    }

    //获取某个snapshot之后的新增数据
    public Map<String, Object> getAppendsTableData(String databaseName, String tableName, Long afterSnapshotId,
                                                   Integer pageSize, Integer pageNum) throws Exception {
//        Table table = getTable(Identifier.create(databaseName, tableName));
//        List<String> colNames = getColumnNameList(table);
//        CloseableIterable<Record> records = PaimonGenerics.read(table).appendsAfter(afterSnapshotId).build();
//        return getDataMapByPages(pageSize, pageNum, colNames, records);

        Map<String, Object> result = new HashMap<>();
        return result;

    }

    //新增数据（提供数据list和分隔符，数据导入可以使用该接口）
    public void addTableData(String databaseName, String tableName, List<String> addData, String splitCode) {
        Table table = getTable(Identifier.create(databaseName, tableName));

        String path = table.options().get("path");

        Path dataPath = new Path(path);
        List<String> colList = new ArrayList<>();
        //
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
        Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
        if (tableSchemaOptional.isPresent()) {
            TableSchema tableSchema = tableSchemaOptional.get();
            colList =
                    tableSchema.fields()
                            .stream()
                            .sorted(Comparator.comparingInt(DataField::id))
                            .map(a -> a.name() + "," + a.type())
                            .collect(Collectors.toList());

        }

//        try {
//            // 获取表字段
//            List<TableColumnsBean> columnsOfTable = getColumnsOfTable(databaseName, tableName);
//            int fieldSize = columnsOfTable.size();
//            BatchWriteBuilder writeBuilder = table.newBatchWriteBuilder().withOverwrite();
//
//            // 2. Write records in distributed tasks
//            BatchTableWrite write = writeBuilder.newWrite();
//
//            while (resultSet.next()) {
//                RowKind rowKind = RowKind.INSERT;
//                GenericRow record1 = new GenericRow(rowKind, fieldSize);
//                for (int i = 0; i < fieldSize; i++) {
//                    // TODO 字段类型
//                    TableColumnsBean tableColumnsBean = columnsOfTable.get(i);
//                    String dataType = tableColumnsBean.getDataType();
//                    PaimonBaseType paimonBaseType = PaimonBaseType.getType(dataType);
//                    Object value = null;
//                    switch (paimonBaseType) {
//                        case STRING:
//                            value = BinaryString.fromString(resultSet.getString(i + 1));
//                            break;
//                        case BOOLEAN:
//                            value = resultSet.getBoolean(i + 1);
//                            break;
//                        case FLOAT:
//                            value = resultSet.getFloat(i + 1);
//                            break;
//                        case DOUBLE:
//                            value = resultSet.getDouble(i + 1);
//                            break;
//                        case LONG:
//                            value = resultSet.getLong(i + 1);
//                            break;
//                        case INTEGER:
//                            value = resultSet.getInt(i + 1);
//                            break;
//                        case DATE:
//                            value = resultSet.getDate(i + 1);
//                            break;
//                        default:
//
//                    }
////                    Object copy = InternalRowUtils.copy(value, paimonBaseType.getType());
//                    record1.setField(i, value);
//                }
//                write.write(record1);
//
//                count++;
//            }
//
//
//            List<CommitMessage> messages = write.prepareCommit();
//
//            // 3. Collect all CommitMessages to a global node and commit
//            BatchTableCommit commit = writeBuilder.newCommit();
//            commit.commit(messages);
//
//        } catch (Exception e) {
//            e.printStackTrace();
//            throw new Exception(e.getMessage());
//        }
    }

    public void addTableDataOfJson(String databaseName, String tableName, ArrayList<String> addData) {
        Table table = getTable(Identifier.create(databaseName, tableName));
        String path = table.options().get("path");

        Path dataPath = new Path(path);
        List<String> colList = new ArrayList<>();
        //
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
        Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
        if (tableSchemaOptional.isPresent()) {
            TableSchema tableSchema = tableSchemaOptional.get();
            colList =
                    tableSchema.fields()
                            .stream()
                            .sorted(Comparator.comparingInt(DataField::id))
                            .map(a -> a.name() + "," + a.type())
                            .collect(Collectors.toList());

        }
//
//        String filepath = path + "/data/" + UUID.randomUUID() + ".parquet";
//        OutputFile file = table.io().newOutputFile(filepath);
//        try {
//            DataWriter<GenericRecord> dataWriter = Parquet.writeData(file)
//                    .schema(schema)
//                    .createWriterFunc(GenericParquetWriter::buildWriter)
//                    .overwrite()
//                    .withSpec(PartitionSpec.unpartitioned())
//                    .build();
//            for (String addDatum : addData) {
//                if (!JSONUtil.isTypeJSON(addDatum)) {
//                    continue;
//                }
//                JSONObject dataJson = JSONUtil.parseObj(addDatum);
//                GenericRecord record = GenericRecord.create(schema);
//                for (int i = 0; i < colList.size(); i++) {
//                    String[] col = colList.get(i).split(",");
//                    Object colValue = dataJson.getByPath(col[0]);
//                    if (ObjectUtils.isEmpty(colValue)) {
//                        continue;
//                    }
//                    record.setField(col[0], getDataByType(col[1], colValue.toString()));
//                }
//                dataWriter.write(record);
//            }
//            dataWriter.close();
//            DataFile dataFile = dataWriter.toDataFile();
//            table.newAppend().appendFile(dataFile).commit();
//        } catch (IOException e) {
//            e.printStackTrace();
//        }
    }

    public void deleteHdfsFile(String filePath) {
        String[] split = filePath.split(":8020");
        String hdfsUrl = split[0] + ":8020";
        String file = split[1];
        Configuration configuration = new Configuration();
        configuration.set("fs.defaultFS", hdfsUrl);
        FileSystem fileSystem = null;
        try {
            fileSystem = FileSystem.get(configuration);
            if (fileSystem.isFile(new org.apache.hadoop.fs.Path(file))) {
                fileSystem.delete(new org.apache.hadoop.fs.Path(file), true);
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            try {
                Objects.requireNonNull(fileSystem).close();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }

    private List<String> getHiveDB(String filePath) {
        ArrayList<String> result = new ArrayList<>();
        String[] split = filePath.split(":8020");
        String hdfsUrl = split[0] + ":8020";
        String file = split[1];
        Configuration configuration = new Configuration();
        configuration.set("fs.defaultFS", hdfsUrl);
        FileSystem fileSystem = null;
        try {
            fileSystem = FileSystem.get(configuration);
            FileStatus[] fileStatuses = fileSystem.listStatus(new org.apache.hadoop.fs.Path(file));
            for (FileStatus fileStatus : fileStatuses) {
                result.add(fileStatus.getPath().getName().split("\\.")[0]);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return result;
    }

    public Object getDataByType(String types, String value) {
        if (StringUtils.isEmpty(value)) {
            return null;
        }
        try {
            switch (types) {
                case "boolean":
                    return Boolean.valueOf(value);
                case "int":
                    return new Integer(value);
                case "long":
                    return new Long(value);
                case "double":
                    return new Double(value);
                case "float":
                    return new Float(value);
                case "binary":
                    ByteBuffer byteBuffer = ByteBuffer.allocate(value.length());
                    byteBuffer.put(value.getBytes());
                    return byteBuffer;
                case "timestamptz":
                    return LocalDateTime.parse(value).atOffset(ZoneOffset.UTC);
                default:
                    return value;
            }
        } catch (Exception e) {
            return null;
        }
    }

    //回滚snapshot
    public void rollbackSnapshot(String databaseName, String tableName, Long snapshotId) {
        Table table = getTable(Identifier.create(databaseName, tableName));
//        table.manageSnapshots().rollbackTo(snapshotId).commit();
    }

    //设置当前的snapshot
    public void setCurrentSnapshot(String databaseName, String tableName, Long snapshotId) {
        Table table = getTable(Identifier.create(databaseName, tableName));
//        table.manageSnapshots().setCurrentSnapshot(snapshotId).commit();
    }

    public void cherryPickSnapshot(String databaseName, String tableName, Long snapshotId) {
        Table table = getTable(Identifier.create(databaseName, tableName));
//        table.manageSnapshots().cherrypick(snapshotId).commit();
    }

    public void mergeSmallFile(String databaseName, String tableName) throws Exception {
        PDPaimonFlinkUtils sparkUtils = PDPaimonFlinkUtils.build(catalogId, type, hiveUri, warehouse, databaseName, execUser);
        Table table = getTable(Identifier.create(databaseName, tableName));


        String path = table.options().get("path");
        String dataPath = path + "/data/";
        String fileSql =
                "select file_path from `" + catalogId + "`.`" + databaseName + "`.`" + tableName + "`.files";
        String mergeFiles =
                "call `" + catalogId + "`.system.rewrite_data_files(table => '`" + databaseName + "`.`"
                        + tableName + "`',options => map('min-input-files','2'))";
        PDHdfsUtils hdfsUtils = PDHdfsUtils.build(catalogId, warehouse, execUser);
        List<String> oldFilePath = new ArrayList<>();
        hdfsUtils.getFiles(dataPath, oldFilePath);
        oldFilePath = oldFilePath.stream().map(files -> files.split("8020")[1]).collect(Collectors.toList());
        sparkUtils.executeSql(mergeFiles);
        List<String> newFilePath = sparkUtils.executeSql(fileSql).stream()
                .map(file -> {
                    Object file_path = file.get("file_path");
                    if (ObjectUtils.isNotEmpty(file_path)) {
                        return file_path.toString().split("8020")[1];
                    }
                    return "";
                }).collect(Collectors.toList());
        oldFilePath.removeAll(newFilePath);
        for (String file : oldFilePath) {
            hdfsUtils.deleteFile(file);
        }
//        sparkUtils.mergeFile(table);


        Path dataPath2 = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath2);

        SnapshotManager snapshotManager = new SnapshotManager(fileIO, dataPath2);
        Snapshot snapshot = snapshotManager.latestSnapshot();

        expireOldSnapshots(snapshot.timeMillis() + 1, table);
        deleteOldVersionFile(hdfsUtils, table);
    }

    public void expireOldSnapshots(Long tsToExpireTime, Table table) throws Exception {
//        table.expireSnapshots().expireOlderThan(tsToExpireTime)
//                .cleanExpiredFiles(true).retainLast(2).commit();
    }

    public boolean deleteOldVersionFile(PDHdfsUtils hdfsUtils, Table table) throws Exception {
        int snapshotSize = 0;

        String path = table.options().get("path");
        Path dataPath2 = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath2);

        SnapshotManager snapshotManager = new SnapshotManager(fileIO, dataPath2);
        List<Snapshot> snapshots = snapshotManager.safelyGetAllSnapshots();
        for (Snapshot ignored : snapshots) {
            snapshotSize++;
        }
        if (snapshotSize != 1) {
            return false;
        }
        String metaPath = path + "/metadata/";
        FileSystem fileSystem = hdfsUtils.getFileSystem();
        if ("hadoop".equals(type)) {
            String version = hdfsUtils.catFile(metaPath + "version-hint.text");
            List<String> deleteFiles = Arrays.stream(fileSystem.listStatus(new org.apache.hadoop.fs.Path(metaPath)))
                    .map(fileStatus -> fileStatus.getPath().getName())
                    .filter(fileName -> fileName.endsWith(".json") &&
                            !("v" + version.replaceAll("\n", "") + ".metadata.json").equals(fileName))
                    .map(fileName -> (metaPath + fileName).split("8020")[1])
                    .collect(Collectors.toList());
            for (String deleteFile : deleteFiles) {
                hdfsUtils.deleteFile(deleteFile);
            }
        } else {
            List<String> metaFiles = Arrays.stream(fileSystem.listStatus(new org.apache.hadoop.fs.Path(metaPath)))
                    .map(fileStatus -> fileStatus.getPath().getName())
                    .filter(fileName -> fileName.endsWith(".json"))
                    .sorted(Comparator.naturalOrder())
                    .collect(Collectors.toList());
            for (int i = 0; i < metaFiles.size() - 1; i++) {
                String deleteFilePath = (metaPath + metaFiles.get(i)).split("8020")[1];
                hdfsUtils.deleteFile(deleteFilePath);
            }
        }
        return true;
    }

    public void addTableDataWithoutPartition(String databaseName, String tableName, List<HashMap<String, String>> addData) {
        Table table = getTable(Identifier.create(databaseName, tableName));
        List<String> colList = new ArrayList<>();


        String path = table.options().get("path");
        Path dataPath = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
        Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
        if (tableSchemaOptional.isPresent()) {
            TableSchema tableSchema = tableSchemaOptional.get();
            colList = tableSchema.fields()
                    .stream()
                    .sorted(Comparator.comparingInt(DataField::id))
                    .map(a -> a.name() + "," + a.type())
                    .collect(Collectors.toList());
        }
//
//        GenericRecord record = GenericRecord.create(schema);
//        String filepath = path + "/data/" + UUID.randomUUID().toString() + ".parquet";
//        OutputFile file = table.io().newOutputFile(filepath);
//        try {
//            DataWriter<GenericRecord> dataWriter = Parquet.writeData(file)
//                    .schema(schema)
//                    .createWriterFunc(GenericParquetWriter::buildWriter)
//                    .overwrite()
//                    .withSpec(PartitionSpec.unpartitioned())
//                    .build();
//            for (HashMap<String, String> addDatum : addData) {
//                for (String s : colList) {
//                    String[] col = s.split(",");
//                    if (ObjectUtils.isNotEmpty(addDatum.get(col[0]))) {
//                        record.setField(col[0], getDataByType(col[1], addDatum.get(col[0])));
//                    }
//                }
//                dataWriter.write(record);
//            }
//            dataWriter.close();
//            DataFile dataFile = dataWriter.toDataFile();
//            table.newAppend().appendFile(dataFile).commit();
//        } catch (IOException e) {
//            e.printStackTrace();
//        }
    }

    //    @Async
    public void addTableData(String databaseName, String tableName, List<HashMap<String, String>> addDataList) {
        Table table = getTable(Identifier.create(databaseName, tableName));
        if (getPartitionMessage(databaseName, tableName).size() == 0 && addDataList.size() > 100000) {
            addTableDataWithoutPartition(databaseName, tableName, addDataList);
        } else {
            LinkedHashMap<String, String> colMap = new LinkedHashMap<>();

            String path = table.options().get("path");
            Path dataPath = new Path(path);

            try {
                // 获取表字段
                List<TableColumnsBean> columnsOfTable = getColumnsOfTable(databaseName, tableName);
                int fieldSize = columnsOfTable.size();
                BatchWriteBuilder writeBuilder = table.newBatchWriteBuilder().withOverwrite();

                // 2. Write records in distributed tasks
                BatchTableWrite write = writeBuilder.newWrite();

                for (HashMap<String, String> dataMap : addDataList) {
                    JSONObject jsonObject = new JSONObject(dataMap);

                    RowKind rowKind = RowKind.INSERT;
                    GenericRow record1 = new GenericRow(rowKind, fieldSize);
                    int j = 0;
                    for (TableColumnsBean tableColumnsBean : columnsOfTable) {
                        String columnName = tableColumnsBean.getColumnName();
                        String dataType = tableColumnsBean.getDataType();
                        PaimonBaseType paimonBaseType = PaimonBaseType.getType(dataType);
                        Object value = null;
                        switch (paimonBaseType) {
                            case STRING:
                                value = BinaryString.fromString(jsonObject.getString(columnName));
                                break;
                            case BOOLEAN:
                                value = jsonObject.getBooleanValue(columnName);
                                break;
                            case FLOAT:
                                value = jsonObject.getFloatValue(columnName);
                                break;
                            case DOUBLE:
                                value = jsonObject.getDoubleValue(columnName);
                                break;
                            case LONG:
                                value = jsonObject.getLongValue(columnName);
                                break;
                            case INTEGER:
                                value = jsonObject.getIntValue(columnName);
                                break;
                            case DATE:
                                value = jsonObject.getDate(columnName);
                                break;
                            default:

                        }
                        record1.setField(j, value);
                        j++;
                    }

                    write.write(record1);
                }

                List<CommitMessage> messages = write.prepareCommit();

                // 3. Collect all CommitMessages to a global node and commit
                BatchTableCommit commit = writeBuilder.newCommit();
                commit.commit(messages);

            } catch (Exception e) {
                e.printStackTrace();
            }


        }
    }

    public void deleteTableData(String databaseName, String tableName, List<HashMap<String, String>> addDataList) {
        Table table = getTable(Identifier.create(databaseName, tableName));
        LinkedHashMap<String, String> colMap = new LinkedHashMap<>();

        String path = table.options().get("path");
        Path dataPath = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
        Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
        if (tableSchemaOptional.isPresent()) {
            TableSchema tableSchema = tableSchemaOptional.get();
            List<String> colNameAndTypes = tableSchema.fields()
                    .stream()
                    .sorted(Comparator.comparingInt(DataField::id))
                    .map(a -> a.name() + "|" + a.type().toString())
                    .collect(Collectors.toList());

            for (String colNameAndType : colNameAndTypes) {
                String[] split = colNameAndType.split("\\|");
                colMap.put(split[0], split[1]);
            }
        }

        PDPaimonFlinkUtils pdPaimonFlinkUtils =
                PDPaimonFlinkUtils.build(catalogId, type, hiveUri, warehouse, databaseName, execUser);
        String deleteTable = "`" + databaseName + "`.`" + tableName + "`";
        deleteTable = "`" + catalogId + "`." + deleteTable + "";

        for (HashMap<String, String> addDatum : addDataList) {
            String colValue = " 1=1 ";
            StringBuilder midColValue = new StringBuilder();
            for (Map.Entry<String, String> col : colMap.entrySet()) {
                midColValue.append(" and `").append(col.getKey()).append("`=").append(getDataByType(addDatum, col.getKey(), col.getValue()));
            }
            colValue = colValue + midColValue;
            int times = countSql(pdPaimonFlinkUtils, deleteTable, colValue);
            String addSql = "delete from " + deleteTable + " where " + colValue;
            log.info("deleteTableData:" + addSql);
            pdPaimonFlinkUtils.executeSql(addSql);
            if (times > 1) {
                insertSqls(databaseName, tableName, times - 1, addDatum);
            }
        }
    }

    private int countSql(PDPaimonFlinkUtils pdPaimonFlinkUtils, String tableName, String whereSql) {
        String countSql = "select count(*) as counts from " + tableName + " where " + whereSql;
        log.info("countSql:" + countSql);
        List<Map<String, Object>> maps = pdPaimonFlinkUtils.executeSql(countSql);
        return new Integer(maps.get(0).get("counts").toString());
    }

    private void insertSqls(String databaseName,
                            String tableName, int times, HashMap<String, String> addDatum) {
        ArrayList<HashMap<String, String>> addDataList = new ArrayList<>();
        for (int i = 0; i < times; i++) {
            addDataList.add(addDatum);
        }
        addTableData(databaseName, tableName, addDataList);
    }

    private String getDataByType(HashMap<String, String> addDatum, String colName, String type) {
        String value = addDatum.get(colName);
        if (StringUtils.isEmpty(value)) {
            return "''";
        }
        List<String> numberTypes = Arrays.asList("int", "float", "double", "long");
        if (numberTypes.contains(type.toLowerCase())
                || type.toLowerCase().contains("map")
                || type.toLowerCase().contains("list")) {
            return value;
        }
        return "'" + value + "'";
    }

    private Map<String, Object> getDataMapByPages(Integer pageSize, Integer pageNum,
                                                  List<TableColumnsBean> columnsBeans, RecordReader<InternalRow> reader) throws Exception {

        HashMap<String, Object> resultMap = new HashMap<>();

        if (ObjectUtils.isEmpty(reader)) {
            throw new Exception("数据读取失败");
        }
//        pageNum = 1;
        ArrayList<Map<String, Object>> resultList = new ArrayList<>();
        int i = 0;
        int skip = (pageNum - 1) * pageSize;

        int finalI = i;

        reader.forEachRemaining(internalRow -> {
            if (finalI < skip + pageSize) {
                Map<String, Object> recordMap = new LinkedHashMap<>();
                int j = 0;
                for (TableColumnsBean tableColumnsBean : columnsBeans) {
                    String colName = tableColumnsBean.getColumnName();
                    String dataType = tableColumnsBean.getDataType();
                    Object value = null;
                    switch (dataType) {
                        case "STRING":
                            value = internalRow.getString(j).toString();
                            break;
                        case "VARCHAR":
                            value = internalRow.getString(j).toString();
                            break;
                        case "LONG":
                            value = internalRow.getLong(j);
                            break;
                        case "INTEGER":
                        case "INT":
                            value = internalRow.getInt(j);
                            break;
                        case "FLOAT":
                            value = internalRow.getFloat(j);
                            break;
                        case "DOUBLE":
                            value = internalRow.getDouble(j);
                            break;
                        case "DECIMAL":
                            value = internalRow.getDecimal(j, tableColumnsBean.getPrecision(), tableColumnsBean.getScale());
                            break;
                    }
                    // 根据字段类型，获取
                    recordMap.put(colName, value);
                    j++;
                }
                resultList.add(recordMap);
            }
        });

        resultMap.put("list", resultList);
        return resultMap;
    }

    @NotNull
    private List<String> getColumnNameList(Table table) {
        //
        try {
            String path = table.options().get("path");
            Path dataPath = new Path(path);
            //
            FlinkFileIO fileIO = new FlinkFileIO(dataPath);

            SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
            Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
            if (tableSchemaOptional.isPresent()) {
                TableSchema tableSchema = tableSchemaOptional.get();
                return tableSchema.fieldNames();
            }

        } catch (Exception e) {

        }
        return new ArrayList<>();
    }

    private Double formatDouble(Double num) {
        if (ObjectUtils.isEmpty(num) || num == 0d) {
            return 0d;
        }
        DecimalFormat df = new DecimalFormat("#.##");
        return new Double(df.format(num));
    }

    public void deleteTable(String databaseName, String tableName) {
        try {
            if ("hive".equals(type)) {
                ((HiveCatalog) catalog).dropTable(Identifier.create(databaseName, tableName), true);
            } else {
                (catalog).dropTable(Identifier.create(databaseName, tableName), true);
            }
        } catch (Exception e) {

        }
    }

    public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) {
        try {
            if ("hive".equals(type)) {
                ((HiveCatalog) catalog).renameTable(Identifier.create(databaseName, tableName),
                        Identifier.create(newDatabaseName, newTableName), true);
            } else {
                (catalog).renameTable(Identifier.create(databaseName, tableName),
                        Identifier.create(newDatabaseName, newTableName), true);
            }
        } catch (Exception e) {
            //
        }
    }

    public void getMetrics(String dataPath, FileSystem fileSystem, Map<String, Double> tableSizeMap, Map<String, Integer> filesMap)
            throws IOException {
        FileStatus[] fileStatuses = fileSystem.listStatus(new org.apache.hadoop.fs.Path(dataPath));
        for (FileStatus fileStatus : fileStatuses) {
            String pathname = fileStatus.getPath().getName();
            if ("manifest".equalsIgnoreCase(pathname) || "schema".equalsIgnoreCase(pathname) || "snapshot".equalsIgnoreCase(pathname)) {
                continue;
            }
            if (fileStatus.isDirectory()) {
                getMetrics(dataPath + "/" + pathname + "/", fileSystem, tableSizeMap, filesMap);
            } else {
                tableSizeMap.put("tableSize", tableSizeMap.get("tableSize") + fileStatus.getLen());
                filesMap.put("files", filesMap.get("files") + 1);
            }
        }
    }

    public void createMetadataPath(String databaseName, String tableName) throws IOException {
        Table table = getTable(Identifier.create(databaseName, tableName));

        String path = table.options().get("path");

        String metadataPath = path + "/manifest/";
        org.apache.hadoop.fs.Path fsPath = new org.apache.hadoop.fs.Path(metadataPath);
        FileSystem fileSystem = FileSystem.get(URI.create(metadataPath), new Configuration());
        FsPermission fsPermission = new FsPermission(FsAction.ALL, //user action
                FsAction.ALL, //group action
                FsAction.ALL);
        fileSystem.mkdirs(fsPath, fsPermission);
    }


    public Map<String, Object> getMetadataFiles(String databaseName, String tableName, String fileName,
                                                Integer pageSize, Integer pageNum) throws IOException {
        Table table = getTable(Identifier.create(databaseName, tableName));

        String path = table.options().get("path");

        String metadataPath = path + "/manifest/";
        FileSystem fileSystem = FileSystem.get(URI.create(metadataPath), new Configuration());
        return getMetadataFiles(metadataPath, fileSystem, fileName, pageSize, pageNum);
    }


    public String getMetadata(String databaseName, String tableName, String fileName) throws Exception {
        Table table = getTable(Identifier.create(databaseName, tableName));

        String dataPath = table.options().get("path");

        String metadataPath = dataPath + "/manifest/" + fileName;
        return PDHdfsAvroUtils.catAvroFile(metadataPath);

//        if (fileName.endsWith(".avro")) {
//            return PDHdfsAvroUtils.catAvroFile(metadataPath);
//        }
        //        if (fileName.endsWith(".avro")) {
//            return PDHdfsAvroUtils.catAvroFile(metadataPath);
//        }
//        if (fileName.endsWith(".orc")) {
//            return PDHdfsOrcUtils.catOrcFile(metadataPath);
//        }
//
//        StringBuilder result = new StringBuilder();
//        String readLine = "";
//        FileSystem fileSystem = FileSystem.get(URI.create(metadataPath), new Configuration());
//        org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path(metadataPath);
//        FSDataInputStream open = fileSystem.open(path);
//
//        while ((readLine = open.readLine()) != null) {
//            result.append(readLine).append("\n");
//        }
//        open.close();
//        return result.toString();
    }

    private Map<String, Object> getMetadataFiles(String metaPath, FileSystem fileSystem, String fileName, Integer pageSize, Integer pageNum)
            throws IOException {
        HashMap<String, Object> result = new HashMap<>();
        List<String> files =
                Arrays.stream(fileSystem.listStatus(new org.apache.hadoop.fs.Path(metaPath)))
                        .filter(FileStatus::isFile)
                        .map(a -> a.getPath().getName())
                        .filter(a -> a.contains(fileName) || StringUtils.isEmpty(fileName))
                        .sorted(Comparator.naturalOrder())
                        .collect(Collectors.toList());
        result.put("total", files.size());
        List<String> viewFiles =
                files.stream()
                        .skip((long) (pageNum - 1) * pageSize)
                        .limit(pageSize)
                        .collect(Collectors.toList());
        result.put("data", viewFiles);
        return result;
    }

    public void createTable(String databaseName, String tableName, List<Map<String, String>> createData,
                            List<Map<String, String>> partitionParam) {
        Map<String, String> optionsMap = new HashMap<>();
        optionsMap.put("bucket", "1");
        createTable(databaseName, tableName, createData, partitionParam, optionsMap);
    }

    public void createTable(String databaseName, String tableName, List<Map<String, String>> createData,
                            List<Map<String, String>> partitionParam, Map<String, String> optionsMap) {
        // 校验分区字段中的字段名是不是表字段名
        List<String> columnNameList = createData.stream()
                .map(stringStringMap -> stringStringMap.get("columnName")).collect(Collectors.toList());
        List<String> uniColumnNameList = columnNameList.stream().distinct().collect(Collectors.toList());
        if (uniColumnNameList.size() != columnNameList.size()) {
            throw new RuntimeException("字段存在重复");
        }
        List<String> partitionNameList = partitionParam.stream()
                .map(stringStringMap -> stringStringMap.get("sourceName")).collect(Collectors.toList());

        if (!new HashSet<>(columnNameList).containsAll(partitionNameList)) {
            throw new RuntimeException("分区字段必须是表字段！");
        }

        try {
            Schema.Builder schemaBuilder = Schema.newBuilder();

            List<String> primaryKeys = new ArrayList<>();
            List<String> partitionKeys = new ArrayList<>();

            for (Map<String, String> entry : createData) {
                if (MapUtils.isEmpty(entry)) {
                    continue;
                }
                String columnName = entry.get("columnName");
                String newColumnName = entry.get("newColumnName");
                String realColumnName = StringUtils.isNoneBlank(newColumnName) ? newColumnName : columnName;

                String comment = entry.get("comment");
                String newComment = entry.get("newComment");
                String realComment = StringUtils.isNoneBlank(newComment) ? newComment : comment;

                String isPrimaryKey = entry.get("isPrimaryKey");
                String newIsPrimaryKey = entry.get("newIsPrimaryKey");
                String realIsPrimaryKey = StringUtils.isNoneBlank(newIsPrimaryKey) ? newIsPrimaryKey : isPrimaryKey;

                if ("1".equals(realIsPrimaryKey)) {
                    primaryKeys.add(realColumnName);
                }
                String isPartitionKey = entry.get("isPartitionKey");
                String newIsPartitionKey = entry.get("newIsPartitionKey");
                String realIsPartitionKey = StringUtils.isNoneBlank(newIsPartitionKey) ? newIsPartitionKey : isPartitionKey;

                if ("1".equals(realIsPartitionKey)) {
                    partitionKeys.add(realIsPartitionKey);
                }
                // 1 不为空必填是false  有点绕
                boolean isNullable = !StringUtils.equals(entry.get("isNullable"), "1");
                boolean newIsNullable = !StringUtils.equals(entry.get("newIsNullable"), "1");
                boolean realIsNullable = newIsNullable || isNullable;

                DataType paimonDataType = PDPaimonColumnUtils.transform(entry);
                schemaBuilder.column(realColumnName, paimonDataType, realComment);

            }
            if (!primaryKeys.isEmpty()) {
                schemaBuilder.primaryKey(primaryKeys);
            }

            if (!partitionNameList.isEmpty()) {
                schemaBuilder.partitionKeys(partitionNameList);
            }
            schemaBuilder.option("merge-engine", "partial-update");

            // bucket = -1 动态分桶，写入时需要指定分桶
            if (optionsMap.containsKey("bucket")) {
                schemaBuilder.option("bucket", optionsMap.get("bucket"));
            }
            if (optionsMap.containsKey("bucketKey")) {
                schemaBuilder.option("bucket-key", optionsMap.get("bucketKey"));
            }

            Schema schema = schemaBuilder.build();

            Identifier tableIdentifier = Identifier.create(databaseName, tableName);
            if ("hive".equals(type)) {
                ((HiveCatalog) catalog).createTable(tableIdentifier, schema, true);
            } else if ("jdbc".equals(type)) {
                ((JdbcCatalog) catalog).createTable(tableIdentifier, schema, true);
            } else if ("hadoop".equals(type)) {
                ((FileSystemCatalog) catalog).createTable(tableIdentifier, schema, true);
            } else {
                (catalog).createTable(tableIdentifier, schema, true);
            }

            createMetadataPath(databaseName, tableName);

        } catch (Exception e) {

            throw new RuntimeException(e);

        }
    }

    public String getCreateTableSqlWithSpark(String databaseName, String tableName, Table table) throws Exception {
        String tableSql = "create table " + databaseName + "." + tableName + " ( id int, name string) using paimon tblproperties (\"table.props\" = \"val\");";

        return tableSql;
    }

    public String getCreateTableSqlWithFlink(String databaseName, String tableName, Table table) throws Exception {

        String tableSql = " create table if not exists " + databaseName + "." + tableName + "";
        String path = table.options().get("path");
        Path dataPath = new Path(path);
        FlinkFileIO fileIO = new FlinkFileIO(dataPath);

        SchemaManager schemaManager = new SchemaManager(fileIO, dataPath);
        Optional<TableSchema> tableSchemaOptional = schemaManager.latest();
        if (tableSchemaOptional.isPresent()) {
            TableSchema tableSchema = tableSchemaOptional.get();
            List<String> partitionKeys = tableSchema.partitionKeys();
            List<String> primaryKeys = tableSchema.primaryKeys();
            Map<String, String> options = tableSchema.options();
            String comment = tableSchema.comment();
            List<DataField> fields = tableSchema.fields();
            List<String> columns = new ArrayList<>();

            tableSql += "(";
            String fieldSql = "";
            for (DataField item : fields) {
                String name = item.name();
                String dataType = item.type().toString();
                //
                columns.add(name + " " + dataType);
            }

            if (!primaryKeys.isEmpty()) {
                String primaryKeysJoin = primaryKeys.stream().collect(Collectors.joining(","));
                columns.add("PRIMARY KEY (" + primaryKeysJoin + ") NOT ENFORCED");
            }
            fieldSql += columns.stream().collect(Collectors.joining(","));

            tableSql += fieldSql;
            tableSql += ")";
            if (!partitionKeys.isEmpty()) {
                String partitionKeysJoin = partitionKeys.stream().collect(Collectors.joining(","));
                tableSql += (" PARTITIONED BY (" + partitionKeysJoin + ")");
            }

            if (!options.isEmpty()) {
                tableSql += " with (";
                List<String> optionsList = new ArrayList<>();
                for (String key : options.keySet()) {
                    String s = options.get(key);
                    optionsList.add("'" + key + "' = '" + s + "' ");
                }
                tableSql += optionsList.stream().collect(Collectors.joining(","));
                tableSql += " ) ";
            }

            if (StringUtils.isNoneBlank(comment)) {
                tableSql += " COMMENT '" + comment + "' ";
            }

        }
        return tableSql;
    }

    public String getCreateTableSql(String databaseName, String tableName, String execType) throws Exception {
        Table table = getTable(Identifier.create(databaseName, tableName));
        switch (execType) {
            case "spark":
                return getCreateTableSqlWithSpark(databaseName, tableName, table);
            case "flink":
            default:
                return getCreateTableSqlWithFlink(databaseName, tableName, table);
        }
    }


    public void updateTable(String databaseName, String tableName, List<Map<String, String>> createData,
                            List<Map<String, String>> partitionParam) {
        List<TableTransactionsBean> transactionsMessage = getTransactionsMessage(databaseName, tableName);
//        if (CollectionUtils.isEmpty(transactionsMessage)) {
        // 先创建一个临时表,临时表创建成功证明，可以创建成功
        createTable(databaseName, tableName + "_tmp", createData, partitionParam);
        deleteTable(databaseName, tableName);
        createTable(databaseName, tableName, createData, partitionParam);
        deleteTable(databaseName, tableName + "_tmp");
        return;
//        }

    }

    public Boolean isExitTable(String databaseName, String tableName) {
        return catalog.tableExists(Identifier.create(databaseName, tableName));
    }

    public ArrayList<String> getAllTableData(String databaseName, String tableName, String splitStr, String isHead) throws Exception {
        ArrayList<String> resultList = new ArrayList<>();
//        Table table = getTable(Identifier.create(databaseName, tableName));
//        List<String> colNames = getColumnNameList(table);
//        if ("1".equals(isHead)) {
//            StringBuilder midStr = new StringBuilder();
//            int i = 1;
//            for (String colName : colNames) {
//                if (i == colNames.size()) {
//                    midStr.append(colName);
//                } else {
//                    midStr.append(colName).append(splitStr);
//                }
//                i++;
//            }
//            resultList.add(midStr.toString());
//        }
//        CloseableIterable<Record> records = PaimonGenerics.read(table).build();
//        if (ObjectUtils.isEmpty(records)) {
//            throw new Exception("表数据为空");
//        }
//        for (Record record : records) {
//            StringBuilder midStr = new StringBuilder();
//            int i = 1;
//            for (String colName : colNames) {
//                Object fieldValue = record.getField(colName);
//                String value = "";
//                if (ObjectUtils.isNotEmpty(fieldValue)) {
//                    value = fieldValue.toString();
//                }
//                if (i == colNames.size()) {
//                    midStr.append(value.replaceAll("\n", " "));
//                } else {
//                    midStr.append(value.replaceAll("\n", " ")).append(splitStr);
//                }
//                i++;
//            }
//            resultList.add(midStr.toString());
//        }
        return resultList;
    }

    public void tableDataToFileWithBranch(String databaseName, String tableName, FSDataOutputStream fileOutPutStream,
                                          String splitStr, String isHead) throws Exception {
//        Table table = getTable(Identifier.create(databaseName, tableName));
//        List<String> colNames = getColumnNameList(table);
//        BufferedWriter bufferedWriter = new BufferedWriter(new OutputStreamWriter(fileOutPutStream));
//        try {
//            if ("1".equals(isHead)) {
//                StringBuilder midStr = new StringBuilder();
//                int i = 1;
//                for (String colName : colNames) {
//                    if (i == colNames.size()) {
//                        midStr.append(colName);
//                    } else {
//                        midStr.append(colName).append(splitStr);
//                    }
//                    i++;
//                }
//                bufferedWriter.write(midStr.toString());
//                bufferedWriter.newLine();
//            }
//            CloseableIterable<Record> records = PaimonGenerics.read(table).build();
//            if (ObjectUtils.isEmpty(records)) {
//                throw new Exception("表数据为空");
//            }
//            for (Record record : records) {
//                StringBuilder midStr = new StringBuilder();
//                int i = 1;
//                for (String colName : colNames) {
//                    Object fieldValue = record.getField(colName);
//                    String value = "";
//                    if (ObjectUtils.isNotEmpty(fieldValue)) {
//                        value = fieldValue.toString();
//                    }
//                    if (i == colNames.size()) {
//                        midStr.append(value.replaceAll("\n", " "));
//                    } else {
//                        midStr.append(value.replaceAll("\n", " ")).append(splitStr);
//                    }
//                    i++;
//                }
//                bufferedWriter.write(midStr.toString());
//                bufferedWriter.newLine();
//            }
//        } catch (Exception e) {
//            throw new Exception(e);
//        } finally {
//            bufferedWriter.close();
//            fileOutPutStream.close();
//        }

    }

    public void updateData(String databaseName, String tableName,
                           HashMap<String, String> oldData, HashMap<String, String> newData) throws Exception {
        ArrayList<HashMap<String, String>> oldDataList = new ArrayList<>();
        ArrayList<HashMap<String, String>> newDataList = new ArrayList<>();
        oldDataList.add(oldData);
        newDataList.add(newData);
//        deleteTableData(databaseName, tableName, oldDataList);
        addTableData(databaseName, tableName, newDataList);
    }

    public int addTableDataFromHDFSFile(String databaseName, String tableName, InputStreamReader inputStreamReader,
                                        String splitStr, String isHead, List<String> addStr) throws Exception {
        int count = 0;
//        BufferedReader bufferedReader = new BufferedReader(inputStreamReader);
//
//        Table table = getTable(Identifier.create(databaseName, tableName));
//        Schema schema = table.schema();
//        List<String> colList =
//                schema.columns()
//                        .stream()
//                        .sorted(Comparator.comparingInt(Types.NestedField::fieldId))
//                        .map(a -> a.name() + "," + a.type())
//                        .collect(Collectors.toList());
//        GenericRecord record = GenericRecord.create(schema);
//        String filepath = table.location() + "/data/" + UUID.randomUUID() + ".parquet";
//        OutputFile file = table.io().newOutputFile(filepath);
//        try {
//            DataWriter<GenericRecord> dataWriter = Parquet.writeData(file)
//                    .schema(schema)
//                    .createWriterFunc(GenericParquetWriter::buildWriter)
//                    .overwrite()
//                    .withSpec(PartitionSpec.unpartitioned())
//                    .build();
//            String readLine = "";
//            while ((readLine = bufferedReader.readLine()) != null) {
//                if (readLine.endsWith(splitStr)) {
//                    readLine = readLine + " ";
//                }
//                String[] lineData = readLine.split(splitStr);
//                int checkLength = lineData.length;
//                if (addStr != null) {
//                    checkLength += addStr.size();
//                }
//                if (checkLength != colList.size()) {
//                    throw new Exception("数据列数和表字段数不匹配");
//                }
//                if ("1".equals(isHead)) {
//                    isHead = "0";
//                    continue;
//                }
//                for (int i = 0; i < colList.size(); i++) {
//                    String[] col = colList.get(i).split(",");
//                    if (i < lineData.length - 1) {
//                        if (ObjectUtils.isNotEmpty(lineData[i])) {
//                            record.setField(col[0], getDataByType(col[1], lineData[i]));
//                        }
//                    } else if (i == lineData.length - 1) {
//                        if (!readLine.endsWith(splitStr)) {
//                            if (ObjectUtils.isNotEmpty(lineData[i])) {
//                                record.setField(col[0], getDataByType(col[1], lineData[i]));
//                            }
//                        }
//                    } else {
//                        if (ObjectUtils.isNotEmpty(Objects.requireNonNull(addStr).get(i - lineData.length))) {
//                            record.setField(col[0], getDataByType(col[1], addStr.get(i - lineData.length)));
//                        }
//                    }
//                }
//                dataWriter.write(record);
//                count++;
//            }
//            dataWriter.close();
//            DataFile dataFile = dataWriter.toDataFile();
//            table.newAppend().appendFile(dataFile).commit();
//        } catch (IOException e) {
//            e.printStackTrace();
//        } finally {
//            bufferedReader.close();
//            inputStreamReader.close();
//        }
        return count;
    }

    public int syncHiveToPaimonTable(PDHdfsUtils pdHdfsUtils, String tablePath, String splitStr,
                                     String distdatabase, String disttable) throws Exception {
        int count = 0;
        ArrayList<String> filePaths = new ArrayList<>();
        FileSystem fileSystem = pdHdfsUtils.getFileSystem();
        pdHdfsUtils.getFiles(tablePath, filePaths);
        for (String filePath : filePaths) {
            InputStreamReader inputStreamReader = null;
            FSDataInputStream fsDataInputStream = fileSystem.open(new org.apache.hadoop.fs.Path(filePath));
            String partitionStr = filePath.split(tablePath + "/")[1];
            List<String> addList = new ArrayList<>();
            if (partitionStr.contains("/")) {
                String[] partitions = partitionStr.split("/");
                for (int i = 0; i < partitions.length - 1; i++) {
                    String[] partition = partitions[i].split("=");
                    addList.add(partition[1]);
                }
            }
            if (filePath.endsWith(".gz")) {
                inputStreamReader = new InputStreamReader(new GZIPInputStream(fsDataInputStream));
            } else {
                inputStreamReader = new InputStreamReader(fsDataInputStream);
            }
            count += addTableDataFromHDFSFile(distdatabase, disttable, inputStreamReader, splitStr, "0", addList);
        }
        return count;
    }

    public int syncHDFSToPaimonTable(PDHdfsUtils pdHdfsUtils, String syncdata, String splitStr,
                                     String distdatabase, String disttable) throws Exception {
        FileSystem fileSystem = pdHdfsUtils.getFileSystem();
        FSDataInputStream fsDataInputStream = fileSystem.open(new org.apache.hadoop.fs.Path(syncdata));
        return addTableDataFromHDFSFile(distdatabase, disttable, new InputStreamReader(fsDataInputStream), splitStr, "0", null);
    }

    public int syncMysqlToPaimonTable(Connection connection, String mysqlTable, String databaseName,
                                      String tableName) throws Exception {
        int count = 0;
        Statement statement = connection.createStatement();

        // TODO 分页处理
        String mysqlDataSql = "select * from " + mysqlTable;
        ResultSet resultSet = statement.executeQuery(mysqlDataSql);
        try {
            Table table = getTable(Identifier.create(databaseName, tableName));
            Map<String, String> options = table.options();
            JSONObject jsonObject = new JSONObject(options);
            Integer bucket = options.containsKey("bucket") ? jsonObject.getInteger("bucket") : 1;


            // 获取表字段
            List<TableColumnsBean> columnsOfTable = getColumnsOfTable(databaseName, tableName);
            int fieldSize = columnsOfTable.size();
            BatchWriteBuilder writeBuilder = table.newBatchWriteBuilder().withOverwrite();

            // 区分静态桶和动态桶，动态桶需要指定桶插入
            // 2. Write records in distributed tasks
            BatchTableWrite write = writeBuilder.newWrite();

            while (resultSet.next()) {
                RowKind rowKind = RowKind.INSERT;
                GenericRow record1 = new GenericRow(rowKind, fieldSize);
                for (int i = 0; i < fieldSize; i++) {
                    int j = i + 1;
                    // TODO 字段类型
                    TableColumnsBean tableColumnsBean = columnsOfTable.get(i);
                    String dataType = tableColumnsBean.getDataType();
                    PaimonBaseType paimonBaseType = PaimonBaseType.getType(dataType);
                    Object value = null;
                    switch (paimonBaseType) {
                        case STRING:
                            value = BinaryString.fromString(resultSet.getString(j));
                            break;
                        case BOOLEAN:
                            value = resultSet.getBoolean(j);
                            break;
                        case FLOAT:
                            value = resultSet.getFloat(j);
                            break;
                        case DOUBLE:
                            value = resultSet.getDouble(j);
                            break;
                        case LONG:
                            value = resultSet.getLong(j);
                            break;
                        case INTEGER:
                            value = resultSet.getInt(j);
                            break;
                        case DATE:
                            value = resultSet.getDate(j);
                            break;
                        default:

                    }
//                    Object copy = InternalRowUtils.copy(value, paimonBaseType.getType());
                    record1.setField(i, value);
                }
                //
                if (bucket > 0) {
                    write.write(record1);
                } else {
                    Random rand = new Random();
                    int randomIntWithBound = rand.nextInt(100); // 例如，生成0到99之间的整数

                    // TODO 根据存储量，动态增加
                    int bucketPos = 1;
                    write.write(record1, bucketPos);
                }

                count++;
            }

            List<CommitMessage> messages = write.prepareCommit();

            // 3. Collect all CommitMessages to a global node and commit
            BatchTableCommit commit = writeBuilder.newCommit();
            commit.commit(messages);

        } catch (Exception e) {
            e.printStackTrace();
            throw new Exception(e.getMessage());
        } finally {
            if (resultSet != null) {
                resultSet.close();
            }
            statement.close();
        }
        return count;
    }
}
