package com.alibaba.otter.node.etl.load.loader;

import com.alibaba.otter.canal.extend.communication.CanalConfigClient;
import com.alibaba.otter.node.common.config.ConfigClientService;
import com.alibaba.otter.node.etl.load.exception.LoadException;
import com.alibaba.otter.node.etl.load.loader.db.DbLoadDumper;
import com.alibaba.otter.node.etl.load.loader.db.context.DataLoadContext;
import com.alibaba.otter.node.etl.load.loader.interceptor.LoadInterceptor;
import com.alibaba.otter.node.etl.load.loader.weight.WeightBuckets;
import com.alibaba.otter.node.etl.load.loader.weight.WeightController;
import com.alibaba.otter.shared.common.model.config.ConfigHelper;
import com.alibaba.otter.shared.common.model.config.channel.Channel;
import com.alibaba.otter.shared.common.model.config.data.DataMediaPair;
import com.alibaba.otter.shared.common.model.config.pipeline.Pipeline;
import com.alibaba.otter.shared.common.utils.thread.NamedThreadFactory;
import com.alibaba.otter.shared.etl.model.*;
import org.apache.commons.lang.exception.ExceptionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.dao.PessimisticLockingFailureException;
import org.springframework.util.CollectionUtils;

import java.sql.Statement;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.*;

public abstract class AbstractLoadAction implements LoadAction, InitializingBean, DisposableBean {

    protected final Logger logger = LoggerFactory.getLogger(getClass());


    protected int retry = 3;

    protected int retryWait = 3000;

    //默认线程池数据量
    private final static int DEFAULT_POOL_SIZE = 5;

    protected int batchSize = 50;

    protected boolean useBatch = true;

    private int poolSize = DEFAULT_POOL_SIZE;
    //工作线程名称
    protected String workName;
    //
    protected LoadInterceptor<LoadContext, ObjectData> interceptor;
    //线程池
    protected ExecutorService executor;
    //manager配置客户端
    protected ConfigClientService configClientService;
    //canal配置客户端
    protected CanalConfigClient canalConfigClient;
    //
    protected LoadStatsTracker loadStatsTracker;


    @Override
    public LoadContext load(RowBatch rowBatch, WeightController controller) {

        Identity identity = rowBatch.getIdentity();
        DataLoadContext context = buildContext(identity, rowBatch.getDatas());

        try {
            List<EventData> datas = rowBatch.getDatas();
            context.setPrepareDatas(datas);
            // 执行重复录入数据过滤
            datas = context.getPrepareDatas();
            if (CollectionUtils.isEmpty(datas)) {
                logger.info("##no event data for load, return");
                return context;
            }

            interceptor.prepare(context);
            // 执行重复录入数据过滤
            datas = context.getPrepareDatas();
            // 处理下ddl语句，ddl/dml语句不可能是在同一个batch中，由canal进行控制
            // 主要考虑ddl的幂等性问题，尽可能一个ddl一个batch，失败或者回滚都只针对这条sql
            if (isDdlDatas(datas)) {
                doDdl(context, datas);
                interceptor.commit(context);
                return context;
            }
            //处理dml记录
            WeightBuckets<EventData> buckets = buildWeightBuckets(context, datas);
            List<Long> weights = buckets.weights();
            controller.start(weights);// weights可能为空，也得调用start方法
            if (CollectionUtils.isEmpty(datas)) {
                logger.info("##no event data for load");
            }
            adjustPoolSize(context); // 根据manager配置调整线程池
            adjustConfig(context); // 调整一下运行参数
            // 按权重构建数据对象
            // 处理数据
            for (Long weight : weights) {
                controller.await(weight.intValue());
                // 处理同一个weight下的数据
                List<EventData> items = buckets.getItems(weight);
                logger.debug("##start load for weight:{}", weight);
                // 预处理下数据
                load(context, items);
                controller.single(weight.intValue());
                logger.debug("##end load for weight:{}", weight);
            }
            interceptor.commit(context);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            interceptor.error(context);
        } catch (Exception e) {
            interceptor.error(context);
            throw new LoadException(e);
        }
        return context;// 返回处理成功的记录
    }


    protected abstract void load(DataLoadContext context, List<EventData> items);

    /**
     * 构建上下文
     */
    protected DataLoadContext buildContext(Identity identity, List<EventData> eventDataCollection) {

        DataLoadContext context = new DataLoadContext();
        context.setIdentity(identity);
        Channel channel = configClientService.findChannel(identity.getChannelId());
        Pipeline pipeline = configClientService.findPipeline(identity.getPipelineId());
        context.setChannel(channel);
        context.setPipeline(pipeline);

        // 因为所有的数据在DbBatchLoader已按照DateMediaSource进行归好类，不同数据源介质会有不同的DbLoadAction进行处理
        // 设置media source时，只需要取第一节点的source即可
        if (CollectionUtils.isEmpty(eventDataCollection)) {
            return context;
        }
        context.setDataMedia(ConfigHelper.findDataMedia(context.getPipeline(), eventDataCollection.get(0).getTableId()));
        return context;
    }

    /**
     * 执行丢弃模式
     */
    protected void doDryRun(DataLoadContext context, List<List<EventData>> totalRows) {
        for (List<EventData> rows : totalRows) {
            if (CollectionUtils.isEmpty(rows)) {
                continue; // 过滤空记录
            }

            for (EventData row : rows) {
                processStat(row, context);// 直接记录成功状态
            }

            context.getProcessedDatas().addAll(rows);
        }
    }


    /**
     * 首先进行并行执行，出错后转为串行执行
     */
    protected void doTwoPhase(DataLoadContext context, List<List<EventData>> totalRows) {
        // 预处理下数据
        List<Future<Exception>> results = new ArrayList<>();
        for (List<EventData> rows : totalRows) {
            if (CollectionUtils.isEmpty(rows)) {
                continue; // 过滤空记录
            }

            results.add(executor.submit(getWorker(context, rows, true)));
        }

        boolean partFailed = false;
        for (int i = 0; i < results.size(); i++) {
            Future<Exception> result = results.get(i);
            Exception ex;
            try {
                ex = result.get();
                for (EventData data : totalRows.get(i)) {
                    interceptor.after(context, data);// 通知加载完成
                }
            } catch (Exception e) {
                ex = e;
            }

            if (ex != null) {
                logger.warn("##load phase one failed!", ex);
                partFailed = true;
            }
        }

        if (partFailed) {
            // if (CollectionUtils.isEmpty(context.getFailedDatas())) {
            // logger.error("##load phase one failed but failedDatas is empty!");
            // return;
            // }

            // 尝试的内容换成phase one跑的所有数据，避免因failed datas计算错误而导致丢数据
            List<EventData> retryEventDatas = new ArrayList<>();
            for (List<EventData> rows : totalRows) {
                retryEventDatas.addAll(rows);
            }

            context.getFailedDatas().clear(); // 清理failed data数据

            // 可能为null，manager老版本数据序列化传输时，因为数据库中没有skipLoadException变量配置
            Boolean skipLoadException = context.getPipeline().getParameters().getSkipLoadException();
            if (skipLoadException != null && skipLoadException) {// 如果设置为允许跳过单条异常，则一条条执行数据load，准确过滤掉出错的记录，并进行日志记录
                for (EventData retryEventData : retryEventDatas) {
                    Callable<Exception> worker = getWorker(context, Collections.singletonList(retryEventData), false);// 强制设置batch为false
                    try {
                        Exception ex = worker.call();
                        if (ex != null) {
                            // do skip
                            logger.warn("skip exception for data : {} , caused by {}",
                                    retryEventData,
                                    ExceptionUtils.getFullStackTrace(ex));
                        }
                    } catch (Exception ex) {
                        // do skip
                        logger.warn("skip exception for data : {} , caused by {}",
                                retryEventData,
                                ExceptionUtils.getFullStackTrace(ex));
                    }
                }
            } else {
                // 直接一批进行处理，减少线程调度
                Callable<Exception> worker = getWorker(context, retryEventDatas, false);// 强制设置batch为false
                try {
                    Exception ex = worker.call();
                    if (ex != null) {
                        throw ex; // 自己抛自己接
                    }
                } catch (Exception ex) {
                    logger.error("##load phase two failed!", ex);
                    throw new LoadException(ex);
                }
            }

            // 清理failed data数据
            for (EventData data : retryEventDatas) {
                interceptor.after(context, data);// 通知加载完成
            }
        }

    }

    protected abstract Callable<Exception> getWorker(DataLoadContext context, List<EventData> eventDataCollection, boolean canBatch);


    // 调整一下线程池
    protected void adjustPoolSize(DataLoadContext context) {

        Pipeline pipeline = context.getPipeline();
        int newPoolSize = pipeline.getParameters().getLoadPoolSize();
        if (newPoolSize == poolSize) {
            return;
        }
        this.poolSize = newPoolSize;
        if (executor instanceof ThreadPoolExecutor pool) {
            pool.setMaximumPoolSize(newPoolSize);
            pool.setCorePoolSize(newPoolSize);
        }
    }

    protected void adjustConfig(DataLoadContext context) {
        Pipeline pipeline = context.getPipeline();
        this.useBatch = pipeline.getParameters().isUseBatch();
        // 旧版本manager配置序列化传输时可能无此配置项，因此只有专门配置过的，才进行调整
        Integer loadBatchSize = pipeline.getParameters().getLoadBatchsize();
        if (loadBatchSize != null && loadBatchSize > 0){
            this.batchSize = loadBatchSize;
        }
    }


    protected void doDdl(DataLoadContext context, List<EventData> dataCollection) {
        // support ddl dryRun
        if (context.getPipeline().getParameters().isDryRun()) {
            context.getProcessedDatas().addAll(dataCollection);
            return;
        }
        for (final EventData data : dataCollection) {
            Boolean skipDdlException = context.getPipeline().getParameters().getSkipDdlException();

            try {

                if (doLoadDDL(context, data)) {
                    context.getProcessedDatas().add(data); // 记录为成功处理的sql
                } else {
                    context.getFailedDatas().add(data);
                }

            } catch (Throwable e) {
                if (skipDdlException) {
                    // do skip
                    logger.warn("skip exception for ddl : {} , caused by {}", data, ExceptionUtils.getFullStackTrace(e));
                } else {
                    throw new LoadException(e);
                }
            }
        }
    }

    protected abstract boolean doLoadDDL(DataLoadContext context, EventData data) throws Exception;

    /**
     * 是否是ddl集合
     */
    protected boolean isDdlDatas(List<EventData> eventDatas) {
        boolean result = false;
        for (EventData eventData : eventDatas) {
            result |= eventData.getEventType().isDdl();
            if (result && !eventData.getEventType().isDdl()) {
                throw new LoadException("ddl/dml can't be in one batch, it's may be a bug , pls submit issues.",
                        DbLoadDumper.dumpEventDatas(eventDatas));
            }
        }

        return result;
    }


    /**
     * 根据映射关系配置，做好权重划分排序
     */
    protected WeightBuckets<EventData> buildWeightBuckets(DataLoadContext context, List<EventData> dataCollection) {
        WeightBuckets<EventData> buckets = new WeightBuckets<>();
        for (EventData data : dataCollection) {
            // 获取对应的weight
            DataMediaPair pair = ConfigHelper.findDataMediaPair(context.getPipeline(), data.getPairId());
            buckets.addItem(pair.getPushWeight(), data);
        }

        return buckets;
    }

    protected void processStat(EventData data, DataLoadContext context) {
        LoadStatsTracker.LoadThroughput throughput = loadStatsTracker.getStat(context.getIdentity());
        LoadStatsTracker.LoadCounter counter = throughput.getStat(data.getPairId());
        EventType type = data.getEventType();
        if (type.isInsert()) {
            counter.getInsertCount().incrementAndGet();
        } else if (type.isUpdate()) {
            counter.getUpdateCount().incrementAndGet();
        } else if (type.isDelete()) {
            counter.getDeleteCount().incrementAndGet();
        }

        counter.getRowCount().incrementAndGet();
        counter.getRowSize().addAndGet(calculateSize(data));
    }


    // 大致估算一下row记录的大小
    private long calculateSize(EventData data) {
        // long size = 0L;
        // size += data.getKeys().toString().getBytes().length - 12 -
        // data.getKeys().size() + 1L;
        // size += data.getColumns().toString().getBytes().length - 12 -
        // data.getKeys().size() + 1L;
        // return size;

        // byte[] bytes = JsonUtils.marshalToByte(data);// 走序列化的方式快速计算一下大小
        // return bytes.length;

        return data.getSize();// 数据不做计算，避免影响性能
    }


    enum ExecuteResult {
        SUCCESS, ERROR, RETRY
    }

    protected abstract class AbstractWorker implements Callable<Exception> {

        protected final DataLoadContext context;
        protected final List<EventData> dataList;
        protected final boolean canBatch;
        protected final List<EventData> allFailedDataList = new ArrayList<>();
        protected final List<EventData> allProcessedDataList = new ArrayList<>();
        protected final List<EventData> processedDataList = new ArrayList<>();
        protected final List<EventData> failedDataList = new ArrayList<>();

        public AbstractWorker(DataLoadContext context, List<EventData> eventDataList, boolean canBatch) {
            this.context = context;
            this.dataList = eventDataList;
            this.canBatch = canBatch;
            // eventData为同一数据库的记录，只取第一条即可

        }

        public Exception call() throws Exception {
            try {
                Thread.currentThread().setName(getThreadName());
                return doCall();
            } finally {
                Thread.currentThread().setName(workName);
            }
        }

        protected abstract String getThreadName();


        private Exception doCall() {
            RuntimeException error;
            ExecuteResult exeResult;
            int index = 0;// 记录下处理成功的记录下标
            while (index < dataList.size()) {
                EventData eventData = null;
                // 处理数据切分
                final List<EventData> splitDataList = new ArrayList<>();
                if (useBatch && canBatch) {
                    int end = Math.min(index + batchSize, dataList.size());
                    splitDataList.addAll(dataList.subList(index, end));
                    index = end;// 移动到下一批次
                } else {
                    eventData = dataList.get(index);
                    index = index + 1;// 移动到下一条
                }

                int retryCount = 0;
                while (true) {
                    try {
                        if (!CollectionUtils.isEmpty(failedDataList)) {
                            splitDataList.clear();
                            splitDataList.addAll(failedDataList); // 下次重试时，只处理错误的记录
                        } else {
                            failedDataList.addAll(splitDataList); // 先添加为出错记录，可能获取lob,datasource会出错
                        }

                        if (useBatch && canBatch) {
                            doBatchLoad(splitDataList);
                        } else {
                            doLoad(eventData);
                        }

                        error = null;
                        exeResult = ExecuteResult.SUCCESS;
                    } catch (PessimisticLockingFailureException ex) {
                        error = new LoadException(ExceptionUtils.getFullStackTrace(ex),
                                DbLoadDumper.dumpEventDatas(splitDataList));
                        exeResult = ExecuteResult.RETRY;
                    } catch (Throwable ex) {
                        error = new LoadException(ExceptionUtils.getFullStackTrace(ex),
                                DbLoadDumper.dumpEventDatas(splitDataList));
                        // if (StringUtils.contains(ex.getMessage(),
                        // "ORA-00001")) {
                        // exeResult = ExecuteResult.RETRY;
                        // } else {
                        // exeResult = ExecuteResult.ERROR;
                        // }
                        exeResult = ExecuteResult.ERROR;
                    }

                    if (ExecuteResult.SUCCESS == exeResult) {
                        allFailedDataList.addAll(failedDataList);// 记录一下异常到all记录中
                        allProcessedDataList.addAll(processedDataList);
                        failedDataList.clear();// 清空上一轮的处理
                        processedDataList.clear();
                        break; // do next eventData
                    } else if (ExecuteResult.RETRY == exeResult) {
                        retryCount = retryCount + 1;// 计数一次
                        // 出现异常，理论上当前的批次都会失败
                        processedDataList.clear();
                        failedDataList.clear();
                        failedDataList.addAll(splitDataList);
                        if (retryCount >= retry) {
                            processFailedDataList(index);// 重试已结束，添加出错记录并退出
                            throw new LoadException(String.format("execute [%s] retry %s times failed",
                                    context.getIdentity().toString(),
                                    retryCount), error);
                        } else {
                            try {
                                int wait = retryCount * retryWait;
                                wait = Math.max(wait, retryWait);
                                Thread.sleep(wait);
                            } catch (InterruptedException ex) {
                                Thread.interrupted();
                                processFailedDataList(index);// 局部处理出错了
                                throw new LoadException(ex);
                            }
                        }
                    } else {
                        // 出现异常，理论上当前的批次都会失败
                        processedDataList.clear();
                        failedDataList.clear();
                        failedDataList.addAll(splitDataList);
                        processFailedDataList(index);// 局部处理出错了
                        throw error;
                    }
                }
            }

            // 记录一下当前处理过程中失败的记录,affect = 0的记录
            context.getFailedDatas().addAll(allFailedDataList);
            context.getProcessedDatas().addAll(allProcessedDataList);
            return null;
        }

        protected abstract void doLoad(EventData eventData);

        protected abstract void doBatchLoad(List<EventData> splitDataList);


        protected void processStat(EventData data, int affect, boolean batch) {
            if (batch && (affect < 1 && affect != Statement.SUCCESS_NO_INFO)) {
                failedDataList.add(data); // 记录到错误的临时队列，进行重试处理
            } else if (!batch && affect < 1) {
                failedDataList.add(data);// 记录到错误的临时队列，进行重试处理
            } else {
                processedDataList.add(data); // 记录到成功的临时队列，commit也可能会失败。所以这记录也可能需要进行重试
                AbstractLoadAction.this.processStat(data, context);
            }
        }

        // 出现异常回滚了，记录一下异常记录
        protected void processFailedDataList(int index) {
            allFailedDataList.addAll(failedDataList);// 添加失败记录
            context.getFailedDatas().addAll(allFailedDataList);// 添加历史出错记录
            for (; index < dataList.size(); index++) { // 记录一下未处理的数据
                context.getFailedDatas().add(dataList.get(index));
            }
            // 这里不需要添加当前成功记录，出现异常后会rollback所有的成功记录，比如processDatas有记录，但在commit出现失败
            // (bugfix)
            allProcessedDataList.addAll(processedDataList);
            context.getProcessedDatas().addAll(allProcessedDataList);// 添加历史成功记录
        }
    }





    // =============== setter / getter ===============

    public void setPoolSize(int poolSize) {
        this.poolSize = poolSize;
    }


    public void setInterceptor(LoadInterceptor<LoadContext, ObjectData> interceptor) {
        this.interceptor = interceptor;
    }


    public void setConfigClientService(ConfigClientService configClientService) {
        this.configClientService = configClientService;
    }

    public void setLoadStatsTracker(LoadStatsTracker loadStatsTracker) {
        this.loadStatsTracker = loadStatsTracker;
    }

    public void setUseBatch(boolean useBatch) {
        this.useBatch = useBatch;
    }

    public void setCanalConfigClient(CanalConfigClient canalConfigClient) {
        this.canalConfigClient = canalConfigClient;
    }

    // =============== setter / getter ===============
    public void setRetry(int retry) {
        this.retry = retry;
    }

    public void setRetryWait(int retryWait) {
        this.retryWait = retryWait;
    }


    @Override
    public void afterPropertiesSet() throws Exception {
        executor = new ThreadPoolExecutor(poolSize,
                poolSize,
                0L,
                TimeUnit.MILLISECONDS,
                new ArrayBlockingQueue<>(poolSize * 4),
                new NamedThreadFactory(workName),
                new ThreadPoolExecutor.CallerRunsPolicy());
    }

    @Override
    public void destroy() throws Exception {
        executor.shutdownNow();
    }
}
