package belf.migrate.engine;

import belf.migrate.api.sink.jdbc.JdbcSink;
import belf.migrate.api.source.cdc.CDCSource;
import belf.migrate.api.taskconf.JobContext;
import belf.migrate.api.taskconf.TaskConf;
import belf.migrate.core.util.ConfigUtil;
import belf.migrate.engine.admin.SparkServer;
import belf.migrate.api.job.Job;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;

import java.io.File;
import java.io.IOException;
import java.util.*;

/**
 * 单例类，保存Engine执行需要的上下文内容，包括HTTP Server，所有的TaskConf、Job以及执行Task和Job需要的上下文：
 * <ul>
 *    <li>httpServer——HTTP服务器，提供对外接口</li>
 *    <li>jobContextMap——执行Job需要的上下文环境</li>
 *    <li>cdcSources——源端数据库连接器</li>
 *    <li>jdbcSinks——目标端数据库连接器</li>
 *    <li>jobs——执行中的Job</li>
 * </ul>
 */
@Slf4j
public class BelfEvn {

    private static final BelfEvn instance = new BelfEvn();

    /** RPC Server提供Console和引擎之间的RPC通信，可基于Web Console实现对迁移任务的管理 */

    /** HTTP Server提供基于HTTP的API，可实现任务管理、工作调度、状态监测等功能 */
    private SparkServer httpServer;

    /**
     * 保存所有的TaskConf。主键为：souceCatalog-sinkCatalog-taskId。<br/> 只有本地测试才需要从conf/task目录下加载TaskConf。生产环境是从console发起HTTP
     * POST请求，提交Job执行。
     */
    private final Map<String, TaskConf> taskConfMap = new HashMap<>();

    /** 保存所有的JobContext。主键为jobId */
    private final Map<String, JobContext> jobContextMap = new HashMap<>();

    /** 允许同时执行多个Debezium CDC同步任务 */
    private final List<CDCSource> cdcSources = new ArrayList<>();

    /** 允许同时执行多个目标端数据库写入任务。原则上和cdcSources是一一对应 */
    private final List<JdbcSink> jdbcSinks = new ArrayList<>();

    /** 保存console提交的任务 */
    private final List<Job> jobs = new ArrayList<>();

    private BelfEvn() {
        //加载所有的TaskConf
        File path = new File(ConfigUtil.getTaskDir());
        if (path.exists()){
        Arrays.stream(
                        path.list())
                .filter(name -> name.endsWith(".json"))
                .forEach(filename -> {
                    ObjectMapper mapper = new ObjectMapper();
                    String absolutePath = path.getAbsolutePath() + File.separator + filename;
                    log.info("Load task conf file: {}", absolutePath);
                    TaskConf taskConf = null;
                    try {
                        taskConf = mapper.readValue(new File(absolutePath), TaskConf.class);
                    } catch (IOException e) {
                        e.printStackTrace();
                        log.error("Load TaskConf error：file={}, exception:\n{}", filename, e);
                    }
                    int dotIndex = filename.lastIndexOf(".");
                    addTaskConf(filename.substring(0, dotIndex), taskConf);
                });
        }
    }

    public static BelfEvn getInstance() {
        return instance;
    }

    public void addTaskConf(String taskConfFilename, TaskConf taskConf) {
        taskConfMap.put(taskConfFilename, taskConf);
    }

    public TaskConf getTaskConf(String taskConfFilename) {
        return taskConfMap.get(taskConfFilename);
    }

    public void addJobContext(String jobId, JobContext jobContext) {
        jobContextMap.put(jobId, jobContext);
    }

    public void addCDCSource(CDCSource cdcSource) {
        cdcSources.add(cdcSource);
    }

    public void addJdbcSink(JdbcSink jdbcSink) {
        jdbcSinks.add(jdbcSink);
    }

    public SparkServer getHttpServer() {
        return httpServer;
    }

    public void setHttpServer(SparkServer httpServer) {
        this.httpServer = httpServer;
    }

    public Map<String, TaskConf> getTaskConfMap() {
        return taskConfMap;
    }

    public Map<String, JobContext> getJobContextMap() {
        return jobContextMap;
    }

    public List<CDCSource> getCdcSources() {
        return cdcSources;
    }

    public List<JdbcSink> getJdbcSinks() {
        return jdbcSinks;
    }

    public List<Job> getJobs() {
        return jobs;
    }

    public void addJob(Job job) {
        jobs.add(job);

    }

    public void stop(Job job) {
        String jobId = "" + job.getJobId();
        if (jobContextMap.containsKey(jobId)) {
            JobContext jobContext = jobContextMap.get(jobId);
            try {
                jobContext.getSourceCatalog().close(); //关闭源端数据库连接
            } catch (Exception e){
            }

            try {
                jobContext.getSinkCatalog().close(); //关闭目标端数据库连接
            } catch (Exception e){
            }
        }
        jobContextMap.remove(jobId);

        Iterator<Job> it = jobs.iterator();
        while (it.hasNext()) { //找到缓存的Job对象并删除
            Job tmp = it.next();
            if (tmp.getJobId().equals(job.getJobId())) {
                it.remove();
            }
        }
    }

    public void finishJob(long jobId) {
        // 先终止CDCSource
        Iterator<CDCSource> itSource = BelfEvn.getInstance().getCdcSources().iterator();
        while (itSource.hasNext()) {
            CDCSource source = itSource.next();
            if (source.getJobContext().getJobId().equals("" + jobId)) {
                source.stop();
                itSource.remove();
                log.info("Stop CDCSource for job: {}", jobId);
            }
        }

        // 再终止JdbcSink
        Iterator<JdbcSink> itSink = BelfEvn.getInstance().getJdbcSinks().iterator();
        while (itSink.hasNext()) {
            JdbcSink sink = itSink.next();
            if (sink.getJobContext().getJobId().equals("" + jobId)) {
                sink.stop();
                itSink.remove();
                log.info("Stop JdbcSink for job: {}", jobId);
            }
        }

        // 关闭执行job所需要的上下文资源
        JobContext jobContext = getJobContextMap().get("" + jobId);
        if (null != jobContext) {
            jobContext.release();
        }
        getJobContextMap().remove("" + jobId);
        // 最后删除Job
        Iterator<Job> it = BelfEvn.getInstance().getJobs().iterator();
        while (it.hasNext()) {
            Job job = it.next();
            if (job.getJobId() == jobId) {
                it.remove();
                log.info("Remove job from cache: {}", jobId);
            }
        }
    }

    public void stopAll() {
        Iterator<Job> it = jobs.iterator();
        while (it.hasNext()) { //找到缓存的Job对象并删除
            Job job = it.next();
            String jobId = "" + job.getJobId();

            if (jobContextMap.containsKey(jobId)) {
                JobContext jobContext = jobContextMap.get(jobId);
                try {
                    jobContext.getSourceCatalog().close(); //关闭源端数据库连接
                } catch (Exception e){
                }

                try {
                    jobContext.getSinkCatalog().close(); //关闭目标端数据库连接
                } catch (Exception e){
                }
            }

            jobContextMap.remove(jobId);
            it.remove();
        }
    }
}
