/*
 * Copyright (c) 2006 Terracotta, Inc. All rights reserved.
 */
package com.google.zahra.workmanager;

import com.google.zahra.testmanager.Test;
import com.google.zahra.testmanager.TestManager;

import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;

// Terracotta cluster events packages
import com.tc.cluster.DsoCluster;
import com.tc.cluster.DsoClusterEvent;
import com.tc.cluster.DsoClusterListener;
import com.tc.injection.annotations.InjectedDsoInstance;
import com.tcclient.cluster.DsoNode;

import org.terracotta.workmanager.pipemanager.Pipe;
import org.terracotta.workmanager.pipemanager.PipeManager;
import org.terracotta.workmanager.queue.Queue;
import org.terracotta.workmanager.routing.RoutableWorkItem;
import org.terracotta.workmanager.routing.Router;

// CommonJ is a BEA and IBM joint specification that provides a standard for
// executing concurrent tasks in a JEE environment.
import commonj.work.Work;
import commonj.work.WorkEvent;
import commonj.work.WorkItem;
import commonj.work.WorkListener;
import commonj.work.WorkManager;

/**
 * A routing aware {@code WorkerManager} that uses an implementation of the
 * {@code Router} interface to do the route work to different work queues. Also
 * supports the dynamic joining and leaving of {@code DynamicWorker}s
 */
public class ZahraWorkManager implements WorkManager, DsoClusterListener {

  // Regardless of how many injections are added,
  // only one DsoCluster instance is instantiated per node.
  @InjectedDsoInstance
  private DsoCluster cluster;

  private final Router<String> router;
  private final TestManager testManager;
  private final RoutableWorkItem.Factory<String> m_workItemFactory =
      new RoutableWorkItem.Factory<String>();
  private final PipeManager<String> pipeManager;

  private final Set<RoutableWorkItem<String>> completedWork;

  private Object nodeId;

  /**
   * Constructor. Sets the router and gets the completed work queue from the
   * router. Also adds a listener to the Terracotta cluster
   *
   * @param router
   */
  public ZahraWorkManager(final Router<String> router, final PipeManager<String> pipeManager,
      final TestManager testManager) {
    this.router = router;
    this.testManager = testManager;
    this.completedWork = router.getAllCompletedWork();
    this.pipeManager = pipeManager;
    cluster.addClusterListener(this);
  }

  /*
   * (non-Javadoc)
   *
   * @see commonj.work.WorkManager#schedule(commonj.work.Work,
   * commonj.work.WorkListener)
   */
  @Override
  public WorkItem schedule(Work work, WorkListener wl) throws IllegalArgumentException {

    if (work == null) {
      throw new IllegalArgumentException("The Work is null");
    }
    
    System.out.println("scheduled job ID: " + ((Test) work).jobID);
    RoutableWorkItem<String> workItem = m_workItemFactory.create(work, wl, null);
    
    return router.route(workItem);

  }

  public WorkItem schedule(RoutableWorkItem<String> workItem, WorkListener wl)
      throws IllegalArgumentException {
    
    if (workItem == null) {
      throw new IllegalArgumentException("The WorkItem is null");
    }

    System.out.println("scheduled job ID: " + ((Test) workItem.getResult()).jobID);
    
    return router.route(workItem);

  }

  /*
   * (non-Javadoc)
   *
   * @see commonj.work.WorkManager#schedule(commonj.work.Work)
   */
  public WorkItem schedule(final Work work) throws IllegalArgumentException {
    return schedule(work, null);
  }

  /**
   * An interfacing method to get the {@completedWork} queue from the router
   *
   * @return The {@code completedWork} queue from the router
   */
  public Set<RoutableWorkItem<String>> getCompletedWork() {
    return completedWork;
  }

  /*
   * (non-Javadoc)
   *
   * @see commonj.work.WorkManager#waitForAll
   */
  public boolean waitForAll(Collection<WorkItem> workItems, long timeout)
      throws InterruptedException {
    final int nrOfPendingWorkItems = workItems.size();
    int nrOfCompletedWorkItems = 0;
    while (true) {
      WorkItem workItem = waitForAny(timeout);
      if (workItem == null) {
        return false;
      }
      nrOfCompletedWorkItems++;
      if (nrOfPendingWorkItems == nrOfCompletedWorkItems) {
        break;
      }
    }
    return true;
  }

  /*
   * (non-Javadoc)
   *
   * @see commonj.work.WorkManager#waitForAny
   */
  public Collection<WorkItem> waitForAny(final Collection<WorkItem> workItems, final long timeout)
      throws InterruptedException {
    final List<WorkItem> completedWorkItems = new ArrayList<WorkItem>();

    // TODO: cheating now by only wrapping a single completed item in a
    // collection
    WorkItem workItem = waitForAny(timeout);
    if (workItem == null) {
      return Collections.emptyList();
    }
    completedWorkItems.add(workItem);
    return completedWorkItems;
  }

  /**
   * Wait for any of the WorkItems to finish. If there are no WorkItems in the
   * list and timeout is not 0, it blocks and waits until it is not empty
   *
   * @param timeout the timeout in ms. If this is 0 then the method returns
   *        immediately, i.e. does not block.
   * @return a single WorkItem that has completed
   * @throws InterruptedException thrown if the wait is interrupted.
   */
  public WorkItem waitForAny(long timeout) throws InterruptedException {
    while (true) {
      synchronized (completedWork) {
        for (WorkItem workItem : completedWork) {
          if (workItem.getStatus() == WorkEvent.WORK_COMPLETED
              || workItem.getStatus() == WorkEvent.WORK_REJECTED) {
            completedWork.remove(workItem);
            return workItem;
          }
        }
        if (timeout == IMMEDIATE) {
          return null;
        }
        if (timeout == INDEFINITE) {
          int size = completedWork.size();
          while (completedWork.size() == size) {
            try {
              completedWork.wait();
            } catch (InterruptedException ie) {
              ie.printStackTrace();
              Thread.currentThread().interrupt();
              throw ie;
            }
          }
        }
      }
    }
  }

  /**
   * Checks if a node ID is equal to the master's (this node) ID
   *
   * @param nodeId The ID to check against
   * @return true if the ID belongs to the master node, false otherwise
   */
  private boolean isMasterNode(Object nodeId) {
    return nodeId.equals(this.nodeId);
  }

  /**
   * Returns the node ID of the current node. Blocks if the node ID has not been
   * set yet
   *
   * @return Object representing the ID of the current node
   * @throws InterruptedException
   */
  public synchronized Object getMyNodeId() throws InterruptedException {
    while (nodeId == null) {
      wait();
    }
    return nodeId;
  }

  /**
   * Sets the node ID of the current node
   *
   * @param nodeId Node ID to set this node to
   */
  public synchronized void thisNodeId(final Object nodeId) {
    System.out.println("work manager node id: " + nodeId);
    this.nodeId = nodeId;
    notify();
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * com.tc.cluster.DsoClusterListener#nodeJoined(com.tc.cluster.DsoClusterEvent
   * )
   */
  @Override
  public void nodeJoined(DsoClusterEvent arg0) {
    // Master doens't have to take care of worker nodes joining
    System.out.println("A new node has joined " + arg0.getNode().getId());
  }

  /*
   * (non-Javadoc)
   *
   * @see
   * com.tc.cluster.DsoClusterListener#nodeLeft(com.tc.cluster.DsoClusterEvent)
   */
  @Override
  public synchronized void nodeLeft(DsoClusterEvent arg0) {

    // Obtain the node ID of the disconnected node
    String nodeId = arg0.getNode().getId();

    System.out.println("Unregistering worker node with ID: " + nodeId);

    Pipe<RoutableWorkItem<String>, String> pipe = pipeManager.get(nodeId.toString());

    // If the pipe is null, that means there pipe to work with (i.e. the node
    // disconnected is not a worker
    if (pipe != null) {
      
      // add all completed items to the completed work set
      System.out.println("Adding all completed items to the completed work set with ID: " + nodeId);
      Queue<RoutableWorkItem<String>> compWorkQueue = pipe.getCompletedWorkQueue();
      for (Iterator<RoutableWorkItem<String>> it = compWorkQueue.iterator(); it.hasNext();) {
        completedWork.add(it.next());
      }

      // copy all pending work (needed since we will have to remove the pipe
      // before the rerouting)
      System.out.println("Copying all pending work with ID: " + nodeId);
      List<RoutableWorkItem<String>> pending = new ArrayList<RoutableWorkItem<String>>();
      Queue<RoutableWorkItem<String>> pendWorkQueue = pipe.getPendingWorkQueue();
      for (Iterator<RoutableWorkItem<String>> it = pendWorkQueue.iterator(); it.hasNext();) {
        pending.add(it.next());
      }

      // unregister the worker which also clears and removes the pipe
      System.out.println("Unregistering the worker with ID: " + nodeId);
      router.unregister(nodeId.toString()); // remove pipe for disconnected
      // worker before fail-over

      // reroute the pending work
      System.out.println("Rerouting the pending work");
      for (RoutableWorkItem<String> workItem : pending) {
        testManager.addWorkItem(workItem);
      }
    }
  }

  /*
   * (non-Javadoc)
   *
   * @seecom.tc.cluster.DsoClusterListener#operationsDisabled(com.tc.cluster.
   * DsoClusterEvent)
   */
  @Override
  public void operationsDisabled(DsoClusterEvent arg0) {
    System.out.println("OPERATIONS DISABLED");
  }

  /*
   * (non-Javadoc)
   *
   * @seecom.tc.cluster.DsoClusterListener#operationsEnabled(com.tc.cluster.
   * DsoClusterEvent)
   */
  @Override
  public synchronized void operationsEnabled(DsoClusterEvent arg0) {

    System.out.println("OPERATIONS ENALBED");

    // If this is the first time the node has connected to the cluster, its
    // nodeId will be null
    if (nodeId == null) {
      thisNodeId(arg0.getNode().getId());

      try {
        System.out.println("MASTER operations enabled ID: " + getMyNodeId());
      } catch (InterruptedException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
      }


      Map<String, Pipe<RoutableWorkItem<String>, String>> pipes = pipeManager.getPipes();
      List<RoutableWorkItem<String>> pending = new ArrayList<RoutableWorkItem<String>>();

      // Check for workers that left when the Master was down, unregister them,
      // and reroute their work
      for (String nodeId : getLeftWorkers()) {
        Pipe<RoutableWorkItem<String>, String> pipe = pipeManager.get(nodeId);

        // add all completed items to the completed work set
        System.out.println(
            "Adding all completed items to the completed work set with ID: " + nodeId);
        Queue<RoutableWorkItem<String>> compWorkQueue = pipe.getCompletedWorkQueue();
        for (Iterator<RoutableWorkItem<String>> it = compWorkQueue.iterator(); it.hasNext();) {
          completedWork.add(it.next());
        }

        // copy all pending work (needed since we will have to remove the pipe
        // before the rerouting)
        System.out.println("Copying all pending work with ID: " + nodeId);
        Queue<RoutableWorkItem<String>> pendWorkQueue = pipe.getPendingWorkQueue();
        for (Iterator<RoutableWorkItem<String>> it = pendWorkQueue.iterator(); it.hasNext();) {
          pending.add(it.next());
        }
        // unregister the worker which also clears and removes the pipe
        System.out.println("Unregistering the worker with ID: " + nodeId);
        router.unregister(nodeId.toString()); // remove pipe for disconnected
        // worker before fail-over
      }

      // first time around for work manager - clean up potential previous
      // state
      // router.reset();

      // Get all the nodes already connected to the cluster
      Collection<String> nodes = pipeManager.getWorkers();

      // There might have been workers that are already connected to the cluster
      // before the master has started. In that case, we have to register each
      // of the into the router
      for (String id : nodes) {
        System.out.println("registering worker node with ID: " + id);
        router.register(id);
      }

      // reroute the pending work
      System.out.println("Rerouting the pending work");
      for (RoutableWorkItem<String> workItem : pending) {
        testManager.addWorkItem(workItem);
      }

    }
  }

  public Collection<DsoNode> getWorkerNodes() {

    Collection<DsoNode> nodes = new LinkedList<DsoNode>();

    for (DsoNode node : cluster.getClusterTopology().getNodes()) {
      if (!isMasterNode(node.getId())) {
        nodes.add(node);
      }
    }
    return nodes;
  }

  public boolean checkIfNodeExists(String id) {
    Collection<DsoNode> nodes = getWorkerNodes();
    for (DsoNode node : nodes) {
      if (node.getId().equals(id)) {
        return true;
      }
    }
    return false;
  }

  public Collection<String> getLeftWorkers() {
    Collection<String> goneNodes = new LinkedList<String>();
    Collection<String> currentPipes = pipeManager.getPipes().keySet();

    for (String id : currentPipes) {
      if (!checkIfNodeExists(id)) {
        goneNodes.add(id);
        System.out.println("Worker " + id + " has left.");
      }
    }

    return goneNodes;

  }
  
  public void clear() throws InterruptedException {

    // Get all the nodes already connected to the cluster
    Collection<String> nodes = pipeManager.getWorkers();
        
    for (String id : nodes) {      
      
      System.out.println("Stopping Worker: " + id);
      pipeManager.sendCommand(id, "STOP");
      
      Pipe<RoutableWorkItem<String>, String> pipe = pipeManager.get(id);
      
      synchronized (pipe) {          
        pipeManager.get(id).getPendingWorkQueue().clear();
      }
      
      System.out.println("Starting Worker: " + id);
      pipeManager.sendCommand(id, "START");      
      
    }
  }
  
  public void resetWorkers() throws InterruptedException {

    // Get all the nodes already connected to the cluster
    Collection<String> nodes = pipeManager.getWorkers();
        
    for (String id : nodes) {      
      
      System.out.println("Resetting Worker: " + id);
      pipeManager.sendCommand(id, "SHUTDOWN");
      
    }
  }

}
