package beautiful.butterfly.drds.data_exchange.job;


import beautiful.butterfly.drds.data_exchange.Container;
import beautiful.butterfly.drds.data_exchange.communicator.AbstractContainerCommunicator;
import beautiful.butterfly.drds.data_exchange.communicator.job.JobContainerCommunicator;
import beautiful.butterfly.drds.data_exchange.constant.Constants;
import beautiful.butterfly.drds.data_exchange.error_code.DataExchangeException;
import beautiful.butterfly.drds.data_exchange.error_code.FrameworkErrorCode;
import beautiful.butterfly.drds.data_exchange.plugin.AbstractJobPlugin;
import beautiful.butterfly.drds.data_exchange.plugin_collector.DefaultJobPluginMessageCollector;
import beautiful.butterfly.drds.data_exchange.plugin_collector.JobPluginMessageCollector;
import beautiful.butterfly.drds.data_exchange.read_and_write_data.AbstractReader;
import beautiful.butterfly.drds.data_exchange.read_and_write_data.AbstractWriter;
import beautiful.butterfly.drds.data_exchange.read_and_write_data.Reader;
import beautiful.butterfly.drds.data_exchange.read_and_write_data.Writer;
import beautiful.butterfly.drds.data_exchange.report.Message;
import beautiful.butterfly.drds.data_exchange.report.Messages;
import beautiful.butterfly.drds.data_exchange.statistics.PerfTrace;
import beautiful.butterfly.drds.data_exchange.util.Configuration;
import beautiful.butterfly.drds.data_exchange.util.ErrorRecordChecker;
import beautiful.butterfly.drds.data_exchange.util.Strings;
import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.Validate;

import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.List;


/**
 * job实例运行在jobContainer容器中，它是所有任务的master，负责初始化、拆分、调度、运行、回收、监控和汇报
 * 但它并不做实际的数据同步操作
 */
@Slf4j
public class JobContainer extends Container
{


    private static final SimpleDateFormat dateFormat = new SimpleDateFormat(
            "yyyy-MM-dd HH:mm:ss");


    private long jobId;


    /**
     * reader和writer jobContainer的实例
     */
    private AbstractReader.AbstractJob abstractJobReader;

    private AbstractWriter.AbstractJob abstractJobWriter;


    private long startTimeStamp;

    private long endTimeStamp;

    private long startTransferTimeStamp;

    private long endTransferTimeStamp;

    private int needChannelNumber;

    private int totalStage = 1;

    private ErrorRecordChecker errorLimit;

    public JobContainer(Configuration configuration)
    {
        super(configuration);

        errorLimit = new ErrorRecordChecker(configuration);
    }

    /**
     * jobContainer主要负责的工作全部在start()里面，包括init、prepare、doBigIntegerSplit、scheduler、
     * post以及destroy和statistics
     */
    @Override
    public void run()
    {
        log.info("DataX jobContainer starts job.");

        boolean hasException = false;
        boolean isDryRun = false;
        try
        {
            this.startTimeStamp = System.currentTimeMillis();
            isDryRun = configuration.getBoolean(Constants.job_setting_dryrun, false);
            if (isDryRun)
            {
                log.info("jobContainer preCheck ...");
                this.preCheck();
            } else
            {

                log.debug("jobContainer preHandle...");
                this.preHandle();
                log.debug("jobContainer init...");
                this.init();
                log.info("jobContainer prepare...");
                this.prepare();
                log.info("jobContainer split...");
                this.totalStage = this.split();
                log.info("jobContainer schedule...");
                this.schedule();
                log.debug("jobContainer post...");
                this.post();
                log.debug("jobContainer postHandle...");
                this.postHandle();
                log.info("jobId [{}] completed successfully.", this.jobId);


            }
        } catch (Throwable e)
        {
            log.error("Exception when job run", e);

            hasException = true;

            if (e instanceof OutOfMemoryError)
            {
                this.destroy();
                System.gc();
            }


            if (super.getContainerCommunicator() == null)
            {
                // 由于 containerCollector 是在 scheduler() 中初始化的，所以当在 scheduler() 之前出现异常时，需要在此处对 containerCollector 进行初始化

                AbstractContainerCommunicator tempContainerCollector;
                // standalone
                tempContainerCollector = new JobContainerCommunicator(configuration);

                super.setContainerCommunicator(tempContainerCollector);
            }

            Message message = super.getContainerCommunicator().collect();
            // 汇报前的状态，不需要手动进行设置
            // message.setState(State.FAILED);
            message.setThrowable(e);
            message.setTimestamp(this.endTimeStamp);

            Message tempComm = new Message();
            tempComm.setTimestamp(this.startTransferTimeStamp);

            Message reportMessage = Messages.getReportCommunication(message, tempComm, this.totalStage);
            super.getContainerCommunicator().report(reportMessage);

            throw DataExchangeException.asDataExchangeException(
                    FrameworkErrorCode.RUNTIME_ERROR, e);
        } finally
        {
            if (!isDryRun)
            {

                this.destroy();
                this.endTimeStamp = System.currentTimeMillis();
                if (!hasException)
                {

                    log.info(PerfTrace.getInstance().summarizeNoException());
                    this.logStatistics();
                }
            }
        }
    }

    private void preCheck() throws SQLException
    {
        this.preCheckInit();
        this.adjustChannelNumber();

        if (this.needChannelNumber <= 0)
        {
            this.needChannelNumber = 1;
        }
        this.preCheckReader();
        this.preCheckWriter();
        log.info("PreCheck通过");
    }

    private void preCheckInit()
    {
        this.jobId = this.configuration.getLong(
                Constants.container_job_id, -1);

        if (this.jobId < 0)
        {
            log.info("Set jobId = 0");
            this.jobId = 0;
            this.configuration.set(Constants.container_job_id,
                    this.jobId);
        }

        Thread.currentThread().setName("job-" + this.jobId);

        JobPluginMessageCollector jobPluginMessageCollector = new DefaultJobPluginMessageCollector(
                this.getContainerCommunicator());
        this.abstractJobReader = this.preCheckReaderInit(jobPluginMessageCollector);
        this.abstractJobWriter = this.preCheckWriterInit(jobPluginMessageCollector);
    }

    private AbstractReader.AbstractJob preCheckReaderInit(JobPluginMessageCollector jobPluginMessageCollector)
    {


        AbstractReader.AbstractJob abstractJobReader = new Reader.Job();//(AbstractReader.Job) Loaders.loadJobPlugin( TaskType.reade, this.readerPluginName);

        this.configuration.set(Constants.job_content_reader_parameter + ".dryRun", true);

        // 设置reader的jobConfig
        abstractJobReader.setJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_reader_parameter));
        // 设置reader的readerConfig
        abstractJobReader.setPeerJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_reader_parameter));

        abstractJobReader.setJobPluginMessageCollector(jobPluginMessageCollector);


        return abstractJobReader;
    }


    private AbstractWriter.AbstractJob preCheckWriterInit(JobPluginMessageCollector jobPluginMessageCollector)
    {


        AbstractWriter.AbstractJob abstractJobWriter = new Writer.Job();// (AbstractWriter.Job) Loaders.loadJobPlugin( TaskType.write, this.writerPluginName);

        this.configuration.set(Constants.job_content_writer_parameter + ".dryRun", true);

        // 设置writer的jobConfig
        abstractJobWriter.setJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_writer_parameter));
        // 设置reader的readerConfig
        abstractJobWriter.setPeerJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_reader_parameter));


        abstractJobWriter.setJobPluginMessageCollector(jobPluginMessageCollector);


        return abstractJobWriter;
    }

    private void preCheckReader() throws SQLException
    {
        this.abstractJobReader.preCheck();
    }

    private void preCheckWriter() throws SQLException
    {
        this.abstractJobWriter.preCheck();
    }

    /**
     * reader和writer的初始化
     */
    private void init() throws SQLException
    {
        this.jobId = this.configuration.getLong(Constants.container_job_id, -1);

        if (this.jobId < 0)
        {
            log.info("Set jobId = 0");
            this.jobId = 0;
            this.configuration.set(Constants.container_job_id,
                    this.jobId);
        }

        JobPluginMessageCollector jobPluginMessageCollector = new DefaultJobPluginMessageCollector(this.getContainerCommunicator());
        //必须先Reader ，后Writer
        this.abstractJobReader = this.initReaderJob(jobPluginMessageCollector);
        this.abstractJobWriter = this.initWriterJob(jobPluginMessageCollector);
    }

    private void prepare() throws SQLException
    {
        this.prepareJobReader();
        this.prepareJobWriter();
    }

    private void preHandle() throws SQLException
    {


        JobPluginMessageCollector jobPluginMessageCollector = new DefaultJobPluginMessageCollector(
                this.getContainerCommunicator());

        AbstractJobPlugin abstractJobPlugin = null;
        abstractJobPlugin.setJobPluginMessageCollector(jobPluginMessageCollector);


        abstractJobPlugin.preHandler(configuration);


    }

    private void postHandle() throws SQLException
    {


        JobPluginMessageCollector jobPluginMessageCollector = new DefaultJobPluginMessageCollector(this.getContainerCommunicator());
        AbstractJobPlugin handler = null;
        handler.setJobPluginMessageCollector(jobPluginMessageCollector);
        handler.postHandler(configuration);

    }


    /**
     * 执行reader和writer最细粒度的切分，需要注意的是，writer的切分结果要参照reader的切分结果，
     * 达到切分后数目相等，才能满足1：1的通道模型，所以这里可以将reader和writer的配置整合到一起，
     * 然后，为避免顺序给读写端带来长尾影响，将整合的结果shuffler掉
     */
    private int split() throws SQLException
    {
        this.adjustChannelNumber();

        if (this.needChannelNumber <= 0)
        {
            this.needChannelNumber = 1;
        }

        List<Configuration> readerTaskConfigs = this
                .doReaderSplit(this.needChannelNumber);
        int taskNumber = readerTaskConfigs.size();
        List<Configuration> writerTaskConfigs = this
                .doWriterSplit(taskNumber);


        /**
         * 输入是reader和writer的parameter list，输出是content下面元素的list
         */
        List<Configuration> contentConfig = mergeReaderAndWriterTaskConfigs(
                readerTaskConfigs, writerTaskConfigs);


        log.debug("contentConfig configuration: " + JSON.toJSONString(contentConfig));

        this.configuration.set(Constants.task_list, contentConfig);

        return contentConfig.size();
    }

    private void adjustChannelNumber()
    {
        int needChannelNumberByByte = Integer.MAX_VALUE;
        int needChannelNumberByRecord = Integer.MAX_VALUE;

        boolean isByteLimit = (this.configuration.getInt(
                Constants.job_setting_speed_byte, 0) > 0);
        if (isByteLimit)
        {
            long globalLimitedByteSpeed = this.configuration.getInt(
                    Constants.job_setting_speed_byte, 10 * 1024 * 1024);

            // 在byte流控情况下，单个Channel流量最大值必须设置，否则报错！
            Long channelLimitedByteSpeed = this.configuration
                    .getLong(Constants.transport_channel_speed_byte);
            if (channelLimitedByteSpeed == null || channelLimitedByteSpeed <= 0)
            {
                DataExchangeException.asDataExchangeException(
                        FrameworkErrorCode.CONFIG_ERROR,
                        "在有总bps限速条件下，单个channel的bps值不能为空，也不能为非正数");
            }

            needChannelNumberByByte =
                    (int) (globalLimitedByteSpeed / channelLimitedByteSpeed);
            needChannelNumberByByte =
                    needChannelNumberByByte > 0 ? needChannelNumberByByte : 1;
            log.info("job set Max-Byte-Speed to " + globalLimitedByteSpeed + " bytes.");
        }

        boolean isRecordLimit = (this.configuration.getInt(
                Constants.job_setting_speed_record, 0)) > 0;
        if (isRecordLimit)
        {
            long globalLimitedRecordSpeed = this.configuration.getInt(
                    Constants.job_setting_speed_record, 100000);

            Long channelLimitedRecordSpeed = this.configuration.getLong(
                    Constants.transport_channel_speed_record);
            if (channelLimitedRecordSpeed == null || channelLimitedRecordSpeed <= 0)
            {
                DataExchangeException.asDataExchangeException(FrameworkErrorCode.CONFIG_ERROR,
                        "在有总tps限速条件下，单个channel的tps值不能为空，也不能为非正数");
            }

            needChannelNumberByRecord =
                    (int) (globalLimitedRecordSpeed / channelLimitedRecordSpeed);
            needChannelNumberByRecord =
                    needChannelNumberByRecord > 0 ? needChannelNumberByRecord : 1;
            log.info("job set Max-Record-Speed to " + globalLimitedRecordSpeed + " records.");
        }

        // 取较小值
        this.needChannelNumber = needChannelNumberByByte < needChannelNumberByRecord ?
                needChannelNumberByByte : needChannelNumberByRecord;

        // 如果从byte或record上设置了needChannelNumber则退出
        if (this.needChannelNumber < Integer.MAX_VALUE)
        {
            return;
        }

        boolean isChannelLimit = (this.configuration.getInt(
                Constants.job_setting_speed_channel, 0) > 0);
        if (isChannelLimit)
        {
            this.needChannelNumber = this.configuration.getInt(
                    Constants.job_setting_speed_channel);

            log.info("job set RecordQueue-Number to " + this.needChannelNumber
                    + " channels.");

            return;
        }

        throw DataExchangeException.asDataExchangeException(
                FrameworkErrorCode.CONFIG_ERROR,
                "Job运行速度必须设置");
    }

    /**
     * schedule首先完成的工作是把上一步reader和writer split的结果整合到具体taskGroupContainer中,
     * 同时不同的执行模式调用不同的调度策略，将所有任务调度起来
     */
    private void schedule()
    {
        /**
         * 这里的全局speed和每个channel的速度设置为B/s
         */

        int taskNumber = this.configuration.getList(Constants.task_list).size();
        int channelsPerTaskGroup = this.configuration.getInt(Constants.container_taskgroup_channel, 5);
        //
        this.needChannelNumber = Math.min(this.needChannelNumber, taskNumber);
        PerfTrace.getInstance().setChannelNumber(needChannelNumber);

        /**
         * 通过获取配置信息得到每个taskGroup需要运行哪些tasks任务
         */

        List<Configuration> taskGroupConfigs = TaskAssigns.assignFairly(this.configuration, this.needChannelNumber, channelsPerTaskGroup);

        log.info("Scheduler starts [{}] taskGroups.", taskGroupConfigs.size());


        Scheduler scheduler;
        try
        {

            scheduler = initScheduler(this.configuration);


            if (this.jobId <= 0)
            {
                throw DataExchangeException.asDataExchangeException(FrameworkErrorCode.RUNTIME_ERROR,
                        "在[ local | distribute ]模式下必须设置jobId，并且其值 > 0 .");
            }


            this.startTransferTimeStamp = System.currentTimeMillis();

            scheduler.schedule(taskGroupConfigs);

            this.endTransferTimeStamp = System.currentTimeMillis();
        } catch (Exception e)
        {

            this.endTransferTimeStamp = System.currentTimeMillis();
            throw DataExchangeException.asDataExchangeException(
                    FrameworkErrorCode.RUNTIME_ERROR, e);
        }

        /**
         * 检查任务执行情况
         */
        this.checkLimit();
    }


    private Scheduler initScheduler(Configuration configuration)
    {
        AbstractContainerCommunicator containerCommunicator = new JobContainerCommunicator(configuration);
        super.setContainerCommunicator(containerCommunicator);
        return new Scheduler(containerCommunicator);
    }

    private void post() throws SQLException
    {
        this.postJobWriter();
        this.postJobReader();
    }

    private void destroy()
    {
        if (this.abstractJobWriter != null)
        {
            this.abstractJobWriter.destroy();
            this.abstractJobWriter = null;
        }
        if (this.abstractJobReader != null)
        {
            this.abstractJobReader.destroy();
            this.abstractJobReader = null;
        }
    }

    private void logStatistics()
    {
        long totalCosts = (this.endTimeStamp - this.startTimeStamp) / 1000;
        long transferCosts = (this.endTransferTimeStamp - this.startTransferTimeStamp) / 1000;
        if (0L == transferCosts)
        {
            transferCosts = 1L;
        }

        if (super.getContainerCommunicator() == null)
        {
            return;
        }

        Message message = super.getContainerCommunicator().collect();
        message.setTimestamp(this.endTimeStamp);

        Message tempComm = new Message();
        tempComm.setTimestamp(this.startTransferTimeStamp);

        Message reportMessage = Messages.getReportCommunication(message, tempComm, this.totalStage);

        // 字节速率
        long byteSpeedPerSecond = message.getLong(Messages.read_succeed_bytes)
                / transferCosts;

        long recordSpeedPerSecond = message.getLong(Messages.read_succeed_records)
                / transferCosts;

        reportMessage.setLong(Messages.byte_speed, byteSpeedPerSecond);
        reportMessage.setLong(Messages.record_speed, recordSpeedPerSecond);

        super.getContainerCommunicator().report(reportMessage);


        log.info(String.format(
                "\n" + "%-26s: %-18s\n" + "%-26s: %-18s\n" + "%-26s: %19s\n"
                        + "%-26s: %19s\n" + "%-26s: %19s\n" + "%-26s: %19s\n"
                        + "%-26s: %19s\n",
                "任务启动时刻",
                dateFormat.format(startTimeStamp),

                "任务结束时刻",
                dateFormat.format(endTimeStamp),

                "任务总计耗时",
                String.valueOf(totalCosts) + "s",
                "任务平均流量",
                Strings.stringify(byteSpeedPerSecond)
                        + "/s",
                "记录写入速度",
                String.valueOf(recordSpeedPerSecond)
                        + "rec/s", "读出记录总数",
                String.valueOf(Messages.getTotalReadRecords(message)),
                "读写失败总数",
                String.valueOf(Messages.getTotalErrorRecords(message))
        ));

        if (message.getLong(Messages.transformer_succeed_records) > 0
                || message.getLong(Messages.transformer_failed_records) > 0
                || message.getLong(Messages.transformer_filter_records) > 0)
        {
            log.info(String.format(
                    "\n" + "%-26s: %19s\n" + "%-26s: %19s\n" + "%-26s: %19s\n",
                    "Transformer成功记录总数",
                    message.getLong(Messages.transformer_succeed_records),

                    "Transformer失败记录总数",
                    message.getLong(Messages.transformer_failed_records),

                    "Transformer过滤记录总数",
                    message.getLong(Messages.transformer_filter_records)
            ));
        }


    }

    /**
     * reade job的初始化，返回Reader.job
     *
     * @return
     */
    private AbstractReader.AbstractJob initReaderJob(
            JobPluginMessageCollector jobPluginMessageCollector) throws SQLException
    {


        AbstractReader.AbstractJob abstractJobReader = new Reader.Job();

        // 设置reader的jobConfig
        abstractJobReader.setJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_reader_parameter));

        // 设置reader的readerConfig
        abstractJobReader.setPeerJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_writer_parameter));

        abstractJobReader.setJobPluginMessageCollector(jobPluginMessageCollector);
        abstractJobReader.init();


        return abstractJobReader;
    }

    /**
     * write job的初始化，返回Writer.job
     *
     * @return
     */
    private AbstractWriter.AbstractJob initWriterJob(
            JobPluginMessageCollector jobPluginMessageCollector) throws SQLException
    {


        AbstractWriter.AbstractJob abstractJobWriter = new Writer.Job();

        // 设置writer的jobConfig
        abstractJobWriter.setJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_writer_parameter));

        // 设置reader的readerConfig
        abstractJobWriter.setPeerJobConfiguration(this.configuration.getConfigureDataMap(
                Constants.job_content_reader_parameter));


        abstractJobWriter.setJobPluginMessageCollector(jobPluginMessageCollector);
        abstractJobWriter.init();


        return abstractJobWriter;
    }

    private void prepareJobReader() throws SQLException
    {
        this.abstractJobReader.prepare();
    }

    private void prepareJobWriter() throws SQLException
    {
        this.abstractJobWriter.prepare();
    }

    // TODO: 如果源头就是空数据
    private List<Configuration> doReaderSplit(int adviceNumber) throws SQLException
    {

        List<Configuration> readerSlicesConfigs = this.abstractJobReader.split(adviceNumber);
        if (readerSlicesConfigs == null || readerSlicesConfigs.size() <= 0)
        {
            throw DataExchangeException.asDataExchangeException(
                    FrameworkErrorCode.PLUGIN_SPLIT_ERROR,
                    "reader切分的task数目不能小于等于0");
        }


        return readerSlicesConfigs;
    }

    private List<Configuration> doWriterSplit(int readerTaskNumber)
    {


        List<Configuration> writerSlicesConfigs = this.abstractJobWriter
                .split(readerTaskNumber);
        if (writerSlicesConfigs == null || writerSlicesConfigs.size() <= 0)
        {
            throw DataExchangeException.asDataExchangeException(
                    FrameworkErrorCode.PLUGIN_SPLIT_ERROR,
                    "writer切分的task不能小于等于0");
        }


        return writerSlicesConfigs;
    }

    /**
     * 按顺序整合reader和writer的配置，这里的顺序不能乱！ 输入是reader、writer级别的配置，输出是一个完整task的配置
     */
    private List<Configuration> mergeReaderAndWriterTaskConfigs(
            List<Configuration> readerTasksConfigs,
            List<Configuration> writerTasksConfigs)
    {
        if (readerTasksConfigs.size() != writerTasksConfigs.size())
        {
            throw DataExchangeException.asDataExchangeException(
                    FrameworkErrorCode.PLUGIN_SPLIT_ERROR,
                    String.format("reader切分的task数目[%d]不等于writer切分的task数目[%d].",
                            readerTasksConfigs.size(), writerTasksConfigs.size())
            );
        }

        List<Configuration> contentConfigs = new ArrayList<Configuration>();
        for (int i = 0; i < readerTasksConfigs.size(); i++)
        {
            Configuration configuration = Configuration.newDefaultDataMap();

            configuration.set(Constants.job_reader_parameter,
                    readerTasksConfigs.get(i));

            configuration.set(Constants.job_writer_parameter,
                    writerTasksConfigs.get(i));


            configuration.set(Constants.task_id, i);
            contentConfigs.add(configuration);
        }

        return contentConfigs;
    }

    /**
     * 这里比较复杂，分两步整合 1. tasks到channel 2. channel到taskGroup
     * 合起来考虑，其实就是把tasks整合到taskGroup中，需要满足计算出的channel数，同时不能多起channel
     * <p/>
     * example:
     * <p/>
     * 前提条件： 切分后是1024个分表，假设用户要求总速率是1000M/s，每个channel的速率的3M/s，
     * 每个taskGroup负责运行7个channel
     * <p/>
     * 计算： 总channel数为：1000M/s / 3M/s =
     * 333个，为平均分配，计算可知有308个每个channel有3个tasks，而有25个每个channel有4个tasks，
     * 需要的taskGroup数为：333 / 7 =
     * 47...4，也就是需要48个taskGroup，47个是每个负责7个channel，有4个负责1个channel
     * <p/>
     * 处理：我们先将这负责4个channel的taskGroup处理掉，逻辑是：
     * 先按平均为3个tasks找4个channel，设置taskGroupId为0，
     * 接下来就像发牌一样轮询分配task到剩下的包含平均channel数的taskGroup中
     * <p/>
     * TODO delete it
     *
     * @param averTaskPerChannel
     * @param channelNumber
     * @param channelsPerTaskGroup
     * @return 每个taskGroup独立的全部配置
     */
    @SuppressWarnings("serial")
    @Deprecated
    private List<Configuration> distributeTasksToTaskGroup(int averTaskPerChannel, int channelNumber, int channelsPerTaskGroup)
    {
        Validate.isTrue(averTaskPerChannel > 0 && channelNumber > 0
                        && channelsPerTaskGroup > 0,
                "每个channel的平均task数[averTaskPerChannel]，channel数目[channelNumber]，每个taskGroup的平均channel数[channelsPerTaskGroup]都应该为正数");
        List<Configuration> taskConfigs = this.configuration
                .getListConfiguration(Constants.task_list);
        int taskGroupNumber = channelNumber / channelsPerTaskGroup;
        int leftChannelNumber = channelNumber % channelsPerTaskGroup;
        if (leftChannelNumber > 0)
        {
            taskGroupNumber += 1;
        }

        /**
         * 如果只有一个taskGroup，直接打标返回
         */
        if (taskGroupNumber == 1)
        {
            final Configuration taskGroupConfig = this.configuration.clone();
            /**
             * configure的clone不能clone出
             */
            taskGroupConfig.set(Constants.task_list, this.configuration
                    .getListConfiguration(Constants.task_list));
            taskGroupConfig.set(Constants.container_taskgroup_channel,
                    channelNumber);
            taskGroupConfig.set(Constants.container_taskgroup_id, 0);
            return new ArrayList<Configuration>()
            {
                {
                    add(taskGroupConfig);
                }
            };
        }

        List<Configuration> taskGroupConfigs = new ArrayList<Configuration>();
        /**
         * 将每个taskGroup中content的配置清空
         */
        for (int i = 0; i < taskGroupNumber; i++)
        {
            Configuration taskGroupConfig = this.configuration.clone();
            List<Configuration> taskGroupJobContent = taskGroupConfig
                    .getListConfiguration(Constants.task_list);
            taskGroupJobContent.clear();
            taskGroupConfig.set(Constants.task_list, taskGroupJobContent);

            taskGroupConfigs.add(taskGroupConfig);
        }

        int taskConfigIndex = 0;
        int channelIndex = 0;
        int taskGroupConfigIndex = 0;

        /**
         * 先处理掉taskGroup包含channel数不是平均值的taskGroup
         */
        if (leftChannelNumber > 0)
        {
            Configuration taskGroupConfig = taskGroupConfigs.get(taskGroupConfigIndex);
            for (; channelIndex < leftChannelNumber; channelIndex++)
            {
                for (int i = 0; i < averTaskPerChannel; i++)
                {
                    List<Configuration> taskGroupJobContent = taskGroupConfig
                            .getListConfiguration(Constants.task_list);
                    taskGroupJobContent.add(taskConfigs.get(taskConfigIndex++));
                    taskGroupConfig.set(Constants.task_list,
                            taskGroupJobContent);
                }
            }

            taskGroupConfig.set(Constants.container_taskgroup_channel,
                    leftChannelNumber);
            taskGroupConfig.set(Constants.container_taskgroup_id,
                    taskGroupConfigIndex++);
        }

        /**
         * 下面需要轮询分配，并打上channel数和taskGroupId标记
         */
        int equalDivisionStartIndex = taskGroupConfigIndex;
        for (; taskConfigIndex < taskConfigs.size()
                && equalDivisionStartIndex < taskGroupConfigs.size(); )
        {
            for (taskGroupConfigIndex = equalDivisionStartIndex; taskGroupConfigIndex < taskGroupConfigs
                    .size() && taskConfigIndex < taskConfigs.size(); taskGroupConfigIndex++)
            {
                Configuration taskGroupConfig = taskGroupConfigs.get(taskGroupConfigIndex);
                List<Configuration> taskGroupJobContent = taskGroupConfig
                        .getListConfiguration(Constants.task_list);
                taskGroupJobContent.add(taskConfigs.get(taskConfigIndex++));
                taskGroupConfig.set(
                        Constants.task_list, taskGroupJobContent);
            }
        }

        for (taskGroupConfigIndex = equalDivisionStartIndex;
             taskGroupConfigIndex < taskGroupConfigs.size(); )
        {
            Configuration taskGroupConfig = taskGroupConfigs.get(taskGroupConfigIndex);
            taskGroupConfig.set(Constants.container_taskgroup_channel,
                    channelsPerTaskGroup);
            taskGroupConfig.set(Constants.container_taskgroup_id,
                    taskGroupConfigIndex++);
        }

        return taskGroupConfigs;
    }

    private void postJobReader() throws SQLException
    {


        this.abstractJobReader.post();

    }

    private void postJobWriter() throws SQLException
    {
        this.abstractJobWriter.post();
    }

    /**
     * 检查最终结果是否超出阈值，如果阈值设定小于1，则表示百分数阈值，大于1表示条数阈值。
     *
     * @param
     */
    private void checkLimit()
    {
        Message message = super.getContainerCommunicator().collect();
        errorLimit.checkRecordLimit(message);
        errorLimit.checkPercentageLimit(message);
    }


}
