package belf.migrate.engine.job.jdbc;

import belf.migrate.api.common.JobMode;
import belf.migrate.api.job.Job;
import belf.migrate.api.model.QualifiersModel;
import belf.migrate.api.sink.jdbc.helper.ColumnRuleHelper;
import belf.migrate.api.table.schema.Catalog;
import belf.migrate.api.table.schema.TablePath;
import belf.migrate.api.taskconf.ConnectionConf;
import belf.migrate.api.taskconf.TableSyncConf;
import belf.migrate.api.taskconf.Task;
import belf.migrate.api.type.LogLevelType;
import belf.migrate.api.util.PostLog;
import belf.migrate.engine.job.*;
import com.alibaba.druid.pool.DruidDataSource;
import lombok.extern.slf4j.Slf4j;

import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import static belf.migrate.api.job.JobCode.FAILTURE;
import static belf.migrate.api.job.JobCode.SUCCESS;

@Slf4j
public class JDBCJobRunner extends BaseJobRunner {
    private ExecutorService readThreadPool;
    private ExecutorService writeThreadPool;

    public JDBCJobRunner(Job job) {
        super(job);
    }

    @Override
    public JobResult execute() {
        log.info("JDBCJob Start...");
        JobResult jobResult = check();
        if (jobResult.getCode() != SUCCESS) {
            return jobResult;
        }

        if (job.getTask().getTaskType() != Task.TaskType.SYNC_DATA || job.getTask().getJobMode() != JobMode.BATCH) {
            return new JobResult(FAILTURE, "Job Task TaskType & JobMode Mismatch.");
        }

        // 判断是否已经存在 源库&目标库的SYNC_DATA&BATCH Job正在执行
        jobResult = JDBCJobManager.getInstance().startJob(this);
        if (jobResult.getCode() != SUCCESS) {
            return jobResult;
        }

        ColumnRuleHelper columnRuleHelper = new ColumnRuleHelper(taskConf.getMapping());

        try (Catalog sourceCatalog = CatalogCreator.createSourceCatalog(taskConf);
             Catalog sinkCatalog = CatalogCreator.createSinkCatalog(taskConf)) {

            String sourceDatabaseName = job.getTask().getTaskConf().getSourceConf().getDatabaseName();
            String sourceSchemaName = job.getTask().getTaskConf().getSourceConf().getSchemaName();
            String sinkDatabaseName = job.getTask().getTaskConf().getSinkConf().getDatabaseName();
            String sinkSchemaName = job.getTask().getTaskConf().getSinkConf().getSchemaName();

            QualifiersModel sourceQualifiersModel = jobContext.getSourceQualifiersModel();
            QualifiersModel sinkQualifiersModel = jobContext.getSinkQualifiersModel();

            List<TableSyncConf> jdbc = taskConf.getJdbc();
            for (TableSyncConf tableSyncConf : jdbc) {
                String tableName = tableSyncConf.getTableName();
                log.info("Start Migration Table: {}, Conf: {}", tableName, tableSyncConf.toJson());
                TablePath sourceTablePath = TablePath.of(sourceDatabaseName, sourceSchemaName, tableName, sourceQualifiersModel);
                TablePath sinkTablePath = TablePath.of(sinkDatabaseName, sinkSchemaName, tableName, sinkQualifiersModel);

                // JDBC读、写数据交换
                Pipe pipe = new MemPipe(sourceTablePath.getFullNameWithQuoted());
//                Pipe pipe = new RedisPipe(sourceTablePath.getFullNameWithQuoted());

                // JDBC读取

                Integer slice = tableSyncConf.getSlice();
                int tableRows = countTableRows(sourceTablePath.getFullNameWithQuoted());
                pipe.setTotal(tableRows);
                PostLog.sendLogModel(job.getJobId(), job.getTask().getTaskType().getName(), pipe.getStatus(), LogLevelType.INFO.getType(), SUCCESS,
                        jobContext.getProgress());
                if (null != slice && slice > 1 && tableRows > 0) {         // 分片
                    readThreadPool = Executors.newFixedThreadPool(slice);  // 每个分片一个线程
                    int sliceSize = (tableRows + slice - 1) / slice;       // 每个分片的行数
                    for (int i = 0; i < slice; i++) {
                        int sliceStart = i * sliceSize;
                        int sliceEnd = Math.min(sliceStart + sliceSize, tableRows);  // 最后一个分片不足量
                        log.info("Table: {}, Rows: {}, Slice: {}, SliceSize: {}, SliceStart: {}, SliceEnd: {}", tableName, tableRows, slice, sliceSize, sliceStart, sliceEnd);
                        Thread jdbcReaderThread = new Thread(new JDBCReader(job, jobContext, tableSyncConf, pipe, sliceStart, sliceEnd, columnRuleHelper), "JDBCReader_" + i);
                        jdbcReaderThread.setDaemon(true);
                        readThreadPool.submit(jdbcReaderThread);
                    }
                } else {
                    readThreadPool = Executors.newFixedThreadPool(1);      // 不分片
                    readThreadPool.execute(new JDBCReader(job, jobContext, tableSyncConf, pipe, 0, tableRows, columnRuleHelper));
                }

                // JDBC写入
                writeThreadPool = Executors.newFixedThreadPool(1);
                JDBCWriter jdbcWriter = new JDBCWriter(job, jobContext, tableSyncConf, pipe, columnRuleHelper);
                writeThreadPool.execute(jdbcWriter);
            }

        } catch (Exception e) {
            log.error("JDBCJob Exception", e);
        }
        log.info("JDBCJob Submitted.");
        return jobResult;
    }

    public void shutdown() {
        if (Objects.nonNull(readThreadPool)) {
            readThreadPool.shutdown();
        }
        if (Objects.nonNull(writeThreadPool)) {
            writeThreadPool.shutdown();
        }
    }

    public static DruidDataSource initDataSource(ConnectionConf connectionConf) {
        DruidDataSource dataSource = new DruidDataSource();
        dataSource.setUrl(connectionConf.getUrl());
        dataSource.setUsername(connectionConf.getUser());
        dataSource.setPassword(connectionConf.getPassword());
        dataSource.setDriverClassName(connectionConf.getDriver());
        dataSource.setInitialSize(1);  // 初始化连接数
        dataSource.setMaxActive(10);   // 最大连接数
        dataSource.setMinIdle(1);      // 最小空闲连接数
        dataSource.setMaxWait(60000);  // 获取连接的最大等待时间（毫秒）
        return dataSource;
    }


    private int countTableRows(String fullTableName) {
        int rows = 0;
        String sql = "SELECT COUNT(*) FROM " + fullTableName;
        DruidDataSource druidDataSource = initDataSource(taskConf.getSourceConf().getConnectionConf());
        try (Connection connection = druidDataSource.getConnection();
             Statement statement = connection.createStatement();
             ResultSet resultSet = statement.executeQuery(sql)) {
            while (resultSet.next()) {
                rows = resultSet.getInt(1);
            }
        } catch (SQLException e) {
            log.error("SQLException SQL: {}", sql, e);
        }
        log.info("Table: {}, Rows: {}", fullTableName, rows);
        return rows;
    }
}
