/*
 *  ------------------------------------------------------------------
 *  Copyright © 2017 Hangzhou DtDream Technology Co.,Lt d. All rights reserved.
 *  ------------------------------------------------------------------
 *              Product: EMR
 *    Module Name: DataTrain
 *      Date Created: 2017-10-23
 *         Description:
 *  ------------------------------------------------------------------
 *  Modification History
 *  DATE            Name           Description
 *  ------------------------------------------------------------------
 *  2017-10-23     XZ 209
 *  ------------------------------------------------------------------
 */

package org.xukai.remoting.sdk.rpc.job;

import com.dtdream.emr.transmission.exception.TransmissionException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.UUID;
import org.xukai.remoting.sdk.rpc.transfer.TransferNode;
import org.xukai.remoting.sdk.rpc.writer.BufferedWriter;
import org.xukai.remoting.sdk.rpc.writer.Writer;

import static com.dtdream.emr.transmission.exception.TransmissionException.Error.EXCEPTION_SDK_JOB_COMMITTED;
import static com.dtdream.emr.transmission.exception.TransmissionException.Error.EXCEPTION_SDK_JOB_IN_PROCESS;
import static com.dtdream.emr.transmission.exception.TransmissionException.Error.EXCEPTION_SDK_JOB_NOT_COMMITTED;

public class UploadJob extends Job {

    private static final Logger LOG = LoggerFactory.getLogger(UploadJob.class);

    private Map<Integer, Writer> writers = new HashMap<>();
    private boolean committed = false;

    /**
     * 创建上传job
     *
     * @param properties @see Job
     */
    public UploadJob(Properties properties) throws TransmissionException {
        super(properties);
    }

    protected final UUID createJobInRemote() throws TransmissionException {
        return transferClient.createUploadJob(getProjectName(), getTableName(), getPartition());
    }

    /**
     * 创建Writer
     *
     * @param id Writer的ID，由用户指定。重复创建相同ID的writer会产生数据覆盖。
     * @return
     */
    public synchronized BufferedWriter newWriter(int id) throws TransmissionException {
        if (committed) {
            throw new TransmissionException(EXCEPTION_SDK_JOB_COMMITTED,
                    String.format("Job[table=`%s`, partition=`%s`] has been committed",
                            getTableName(), getPartition()));
        }

        TransferNode node = transferClient.createWriter(getJobId(), getTableName(), getPartition(), id);
        BufferedWriter writer = new BufferedWriter(this, id, node);
        this.writers.put(id, writer);
        return writer;
    }

    public synchronized Writer getWriter(int writerId) {
        return writers.get(writerId);
    }

    public synchronized void commit() throws TransmissionException {
        if (!committed) {
            for (Writer writer : writers.values()) {
                if (!writer.isClosed()) {
                    throw new TransmissionException(EXCEPTION_SDK_JOB_IN_PROCESS,
                            String.format("Writer[table=`%s`, partition=`%s`, readerId=%s] has not been closed.",
                                    getTableName(), getPartition(), writer.getId()));
                }
            }

            transferClient.commitUploadJob(getJobId());
            LOG.info("Job Committed: table=`{}`, partition=`{}`", getTableName(), getPartition());
            committed = true;
        } else {
            LOG.warn("Commit a committed Job has no effects");
        }
    }
}
