package HandshakeJoin;

import java.util.ArrayList;
import java.net.URL;
import com.google.common.collect.Lists;
import com.google.common.base.Preconditions;
import org.apache.commons.lang.ArrayUtils;
import org.apache.s4.base.Event;
import org.apache.s4.core.App;
import org.apache.s4.core.Stream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class HandshakeJoinApp extends App
{
    transient private static final Logger logger = LoggerFactory
            .getLogger(HandshakeJoinApp.class);

    private static final String[] supportedDatabases = { "cassandra", "hbase",
            "logbase" };

    private int numJoinWorkers = 4;
    private int capacityOfWindowR = 256;
    private int capacityOfWindowS = 256;
    private int expiryInterval = 100;
    private boolean randomSource = false;
    private int ingestInterval = 90;
    private long tupleRLifespan = 1000;
    private long tupleSLifespan = 1000;
    private int snapshotThrh = 256;
    private int snapshotInterval = 2000;
    private int delayCount = 5;
    private boolean blockedTrans = false;
    private int blkSizeR = 16;
    private int blkSizeS = 16;
    private int numFieldsR = 4;
    private int numFieldsS = 4;
    private boolean multimapJoin = false;
    private boolean bstJoin = false;
    private String dbChoice = null;
    private ArrayList<JoinConfig> jConfigs = null;

    @Override
    protected void onInit() {
        /* Configurations */
        setGlobalParameters("/configuration.txt");

        /* Join worker PEs and their upstreams. */
        ArrayList<JoinWorker> jWorkers = Lists
                .newArrayListWithCapacity(numJoinWorkers);
        ArrayList<Stream<Message>> streamR = Lists
                .newArrayListWithCapacity(numJoinWorkers);
        ArrayList<Stream<Message>> streamS = Lists
                .newArrayListWithCapacity(numJoinWorkers);

        for (int i = 0; i < numJoinWorkers; ++i) {
            JoinWorker jw = createPE(JoinWorker.class);
            jw.setWorkerName("Worker_" + i)
                    .setCapacityOfWindowR(capacityOfWindowR)
                    .setCapacityOfWindowS(capacityOfWindowS)
                    .setEnableBlockedTransfer(blockedTrans)
                    .setTupleBlockSizeR(blkSizeR).setTupleBlockSizeS(blkSizeS)
                    .setEnableMultimapJoin(multimapJoin)
                    .setEnableBSTJoin(bstJoin)
                    .setDisplayStatusInterval(1000);

            Stream<Message> sR = createStream("R_" + i, new MessageKeyFinder(),
                    jw);
            Stream<Message> sS = createStream("S_" + i, new MessageKeyFinder(),
                    jw);

            jWorkers.add(jw);
            streamR.add(sR);
            streamS.add(sS);
        }
        jWorkers.get(0).setLeftMost(true);
        jWorkers.get(numJoinWorkers - 1).setRightMost(true);

        /* Join driver PEs and their upstreams. */
        JoinDriver jDriverLeft = createPE(JoinDriver.class);
        jDriverLeft.setDriverName("Driver_R")
                .setDirection(Message.Direction.FROM_LEFT)
                .setJoinConfigs(jConfigs).setTupleLifespan(tupleRLifespan)
                .setExpiryInterval(expiryInterval);

        JoinDriver jDriverRight = createPE(JoinDriver.class);
        jDriverRight.setDriverName("Driver_S")
                .setDirection(Message.Direction.FROM_RIGHT)
                .setJoinConfigs(jConfigs).setTupleLifespan(tupleSLifespan)
                .setExpiryInterval(expiryInterval);

        /* Result collector PE and its upstream. */
        ResultCollector resultCollector = createPE(ResultCollector.class);
        resultCollector.setThreshold(snapshotThrh)
                .setSnapShotInterval(snapshotInterval)
                .setSnapshotDelayCount(delayCount);

        Stream<JoinResult> streamResult = createStream("JoinResult",
                new JoinResultKeyFinder(), resultCollector);

        /* Result persistor PE and its upstream. */
        ResultPersistor resultPersistor = createPE(ResultPersistor.class);
        resultPersistor.setNumberOfFields(numFieldsR + numFieldsS - 1)
                .setDBChoice(dbChoice).setConsoleDisplay(false);

        Stream<Snapshot> streamSnapshot = createStream("Snapshot",
                new SnapshotKeyFinder(), resultPersistor);

        /* Construct handshake join topology. */
        jDriverLeft.setDownStream(streamR.get(0));
        jWorkers.get(0).setDownStreamRight(streamR.get(1));

        for (int i = 1; i < numJoinWorkers - 1; ++i) {
            jWorkers.get(i).setDownStreamRight(streamR.get(i + 1))
                    .setDownStreamLeft(streamS.get(i - 1));
        }

        jWorkers.get(numJoinWorkers - 1).setDownStreamLeft(
                streamS.get(numJoinWorkers - 2));
        jDriverRight.setDownStream(streamS.get(numJoinWorkers - 1));

        /* Construct result handling topology. */
        for (int i = 0; i < numJoinWorkers; ++i) {
            jWorkers.get(i).setDownStreamResult(streamResult);
        }

        resultCollector.setDownStream(streamSnapshot);

        /* PEs that generate inputs (for test). */
        if (randomSource) {
            Stream<Event> dataStreamR = createStream("DataR",
                    new DataKeyFinder(), jDriverLeft);

            GenerateDataPE generateR = createPE(GenerateDataPE.class);
            generateR.setDownStream(dataStreamR).setNumberOfFields(numFieldsR)
                    .setRandomSeed(8871).setIngestInterval(ingestInterval)
                    .setSingleton(true);

            Stream<Event> dataStreamS = createStream("DataS",
                    new DataKeyFinder(), jDriverRight);

            GenerateDataPE generateS = createPE(GenerateDataPE.class);
            generateS.setDownStream(dataStreamS).setNumberOfFields(numFieldsS)
                    .setRandomSeed(2605).setIngestInterval(ingestInterval)
                    .setSingleton(true);
        }

        /* Global initialization for the database. */
        initDB();

        /* Query processor PE and its remote stream. */
        QueryProcessor queryProcessor = createPE(QueryProcessor.class);
        queryProcessor.setNumberOfFields(numFieldsR + numFieldsS - 1)
                .setDBChoice(dbChoice);

        createInputStream("QueryRequest", new QueryRequestKeyFinder(),
                queryProcessor);

        /* Remote streams for input adapter. */
        createInputStream("ExtDataR", new DataKeyFinder(), jDriverLeft);
        createInputStream("ExtDataS", new DataKeyFinder(), jDriverRight);
    }

    @Override
    protected void onStart() {
        logGlobalParameters();
    }

    @Override
    protected void onClose() {}

    private void initDB() {
        if (dbChoice != null) {
            if (dbChoice.equals("cassandra")) {
                CassandraClient.initGlobal();
            }
            else if (dbChoice.equals("hbase")) {
                HBaseClient.initGlobal();
            }
            else if (dbChoice.equals("logbase")) {
                LogBaseClient.initGlobal();
            }
        }
    }

    private void setGlobalParameters(final String filename) {
        URL url = this.getClass().getResource(filename);
        Preconditions.checkState(url != null, "invalid join description file");

        Configure config = (new Configure()).parseJoinDespFile(url);

        jConfigs = config.getJoinConfigs();
        numJoinWorkers = config.getNumJoinWorkers();
        capacityOfWindowR = config.getCapacityOfWindowR();
        capacityOfWindowS = config.getCapacityOfWindowS();
        expiryInterval = config.getExpiryInterval();
        randomSource = config.getEnableRandomSource();
        ingestInterval = config.getIngestInterval();
        tupleRLifespan = config.getTupleRLifespan();
        tupleSLifespan = config.getTupleSLifespan();
        snapshotThrh = config.getSnapshotThreshold();
        snapshotInterval = config.getSnapshotInterval();
        delayCount = config.getSnapshotDelayCount();
        blockedTrans = config.getEnableBlockedTransfer();
        blkSizeR = config.getTupleBlockSizeR();
        blkSizeS = config.getTupleBlockSizeS();
        numFieldsR = config.getNumberOfFieldsInTupleR();
        numFieldsS = config.getNumberOfFieldsInTupleS();
        multimapJoin = config.getEnableMultimapJoin();
        bstJoin = config.getEnableBSTJoin();
        dbChoice = config.getDBChoice();

        Preconditions.checkState(!(multimapJoin & bstJoin),
                "at most 1 optimized join processing strategy is allowed");

        if (!ArrayUtils.contains(supportedDatabases, dbChoice)) {
            dbChoice = null;
            logger.info("None of the supported databases is chosen");
        }
    }

    private void logGlobalParameters() {
        logger.info("Number of joins: " + jConfigs.size());
        logger.info("Number of join workers: " + numJoinWorkers);
        logger.info("Capacity of per sub-window R: " + capacityOfWindowR);
        logger.info("Capacity of per sub-window S: " + capacityOfWindowS);
        logger.info("Expiry check interval: " + expiryInterval);
        logger.info("Enable random source (internal): " + randomSource);
        logger.info("Ingest interval: " + ingestInterval);
        logger.info("Tuple R lifespan: " + tupleRLifespan);
        logger.info("Tuple S lifespan: " + tupleSLifespan);
        logger.info("Snapshot threshold: " + snapshotThrh);
        logger.info("Snapshot interval: " + snapshotInterval);
        logger.info("Delay committing count: " + delayCount);
        logger.info("Enable blocked transfer: " + blockedTrans);
        logger.info("Batching size of tuple block of R: " + blkSizeR);
        logger.info("Batching size of tuple block of R: " + blkSizeS);
        logger.info("Number of fields in R: " + numFieldsR);
        logger.info("Number of fields in S: " + numFieldsS);
        logger.info("Enable Multimap-based join processing: " + multimapJoin);
        logger.info("Enable BST-based join processing: " + bstJoin);
        logger.info("Database: " + dbChoice);
    }
}