/*
 * Copyright 2011-2012 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.sabayframework.cluster.impl;

import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.locks.ReentrantReadWriteLock;

import org.jgroups.Address;
import org.jgroups.logging.Log;
import org.jgroups.logging.LogFactory;
import org.jgroups.util.Util;
import org.sabayframework.cluster.Cluster;
import org.sabayframework.cluster.ClusterEvent;
import org.sabayframework.cluster.ClusterId;
import org.sabayframework.cluster.ClusterListener;
import org.sabayframework.cluster.NodeExecutionQueue;
import org.sabayframework.cluster.NodeId;
import org.sabayframework.cluster.NodeMapping;
import org.sabayframework.cluster.impl.kryo.KryoServer;
import org.w3c.dom.Element;


/**
 * Class maintains shared cluster state
 * @author Alex Antonau
 * */
public final class ClusterState extends BaseNode<ClusterId,ClusterId> implements Cluster
{

	private static final Log log = LogFactory.getLog(ClusterState.class);
	// RW lock to control concurrent access to the state
	private final ReentrantReadWriteLock lock;
	//connection to the cluster
	private final ClusterConnection clusterConn;
	// map of computers to jgroups address
	private final HashMap<RemoteComputer, Address> computers;
	//cluster listeners
	private final ArrayList<ClusterListener> listeners;
	//consistent hashing for computers
	private final ConsistentHashing mapping;
	
	private NodeExecutionQueue executionQueue;
	
	
	public ClusterState(String clusterName, Element props) throws Exception {
		super(new ClusterIdImpl(clusterName));
		lock = new ReentrantReadWriteLock();
		computers = new HashMap<RemoteComputer, Address>();
		listeners = new ArrayList<ClusterListener>();
		mapping = new ConsistentHashing(1);
		clusterConn = new ClusterConnection(clusterName, props, this);
		clusterConn.start();
	}

	@Override
	public ClusterId getClusterId() {
		return id;
	}

	public void startExecutionQueue(LocalComputer lc, int maxBufferSize, int maxQueueSize) throws Exception{
		RemoteComputer rc = new RemoteComputer(lc);
		lock.readLock().lock();
		try{
			if(computers.containsKey(rc)) throw new IllegalArgumentException("Computer with the same id is already joined the cluster: "+lc.getComputerId());
		}finally{
			lock.readLock().unlock();
		}		
		lock.writeLock().lock();
		try{
			KryoServer ks = new KryoServer(this, lc, lc.getComputerId().getPort(), maxBufferSize, maxQueueSize);
			ks.start();
			this.executionQueue = ks;
			computers.put(rc, clusterConn.getLocalAddress());
		}finally{
			lock.writeLock().unlock();
		}
        ArrayList<RemoteComputer> joined = new ArrayList<RemoteComputer>(1);
        joined.add(rc);
        handleComputerChanges(joined, null);
        //send event to notify about new computer
        final ByteArrayOutputStream out_stream=new ByteArrayOutputStream(512);
        DataOutputStream out=new DataOutputStream(out_stream);
        out.writeShort(1);
        write(rc, out);
        byte[] data = out_stream.toByteArray();
        clusterConn.sendEvent(new ClusterEvent(ClusterEvent.ADD_COMPUTER, data));
	}
	void membersJoined(List<Address> members){}
	
	void membersLeft(List<Address> members){
        ArrayList<RemoteComputer> left = new ArrayList<RemoteComputer>();
		lock.writeLock().lock();
		try{
	        Iterator<RemoteComputer> it = computers.keySet().iterator();
	        while(it.hasNext()){
	        	RemoteComputer rc=it.next();
	        	Address addr = computers.get(rc);
	        	if(members.contains(addr)) {
	        		it.remove();
	        		left.add(rc);
	        	}
	        }
		}finally{
			lock.writeLock().unlock();
		}
        handleComputerChanges(null, left);
	}
	void mergeState(InputStream state) throws Exception{
        DataInputStream in=new DataInputStream(state);
        final int len = in.readUnsignedShort();
        if(len==0) return;
        ArrayList<RemoteComputer> joined = new ArrayList<RemoteComputer>();
		lock.writeLock().lock();
		try{
	        for(int i=0;i<len;++i){
	        	RemoteComputer rc=new RemoteComputer();
	        	rc.readFrom(in);
	        	Address addr = Util.readAddress(in);
	        	if(!computers.containsKey(rc)){
	        		joined.add(rc);
	        	}
	        	computers.put(rc, addr);
	        }
		}finally{
			lock.writeLock().unlock();
		}
        handleComputerChanges(joined, null);
	}
	void handleComputerChanges(ArrayList<RemoteComputer> joined, ArrayList<RemoteComputer> left){
		ClusterListener[] currentListeners = null;
		lock.readLock().lock();
		try{
			currentListeners = listeners.toArray(new ClusterListener[listeners.size()]);
		}finally{
			lock.readLock().unlock();
		}
		if(joined!=null && joined.size()>0){
			for(RemoteComputer rc : joined){
				mapping.add(rc.getId());
				for(ClusterListener l : currentListeners){
					try{
						l.computerJoined(rc);
					} catch (Exception e) {
						log.error("computerJoined on "+l.toString(), e);
					}
				}
			}
		}
		if(left!=null && left.size()>0){
			for(RemoteComputer rc : left){
				mapping.remove(rc.getId());
				for(ClusterListener l : currentListeners){
					try{
						l.computerLeft(rc);
					} catch (Exception e) {
						log.error("computerLeft on "+l.toString(), e);
					}
				}
			}
			
		}
	}
	private void write(RemoteComputer rc, DataOutputStream out) throws Exception{
    	Address addr = computers.get(rc);
   		rc.writeTo(out);
   		Util.writeAddress(addr, out);
	}
	
	void getState(OutputStream output) throws Exception{
        DataOutputStream out=new DataOutputStream(output);
		lock.readLock().lock();
		try{
	        out.writeShort(computers.size());
	        Iterator<RemoteComputer> it = computers.keySet().iterator();
	        while(it.hasNext()){
	        	RemoteComputer rc=it.next();
	        	write(rc, out);
	        }
		}finally{
			lock.readLock().unlock();
		}
	}
	
	void handleEvent(ClusterEvent event) throws Exception {
		switch(event.getType()){
		case ClusterEvent.ADD_COMPUTER:
			mergeState(new ByteArrayInputStream(event.getData()));
			break;
		case ClusterEvent.EXIT:
			clusterConn.stop();
			break;
		}
		ClusterListener[] currentListeners = null;
		lock.readLock().lock();
		try{
			currentListeners = listeners.toArray(new ClusterListener[listeners.size()]);
		}finally{
			lock.readLock().unlock();
		}
		
		for(ClusterListener l : currentListeners){
			try{
				l.handleEvent(event);
			} catch (Exception e) {
				log.error("handleEvent at "+l.toString(), e);
			}
		}
	}

	@Override
	public void addEventListener(ClusterListener listener) {
		lock.writeLock().lock();
		try{
			listeners.remove(listener);
			listeners.add(listener);
		}finally{
			lock.writeLock().unlock();
		}
	}

	@Override
	public void removeEventListener(ClusterListener listener) {
		lock.writeLock().lock();
		try{
			listeners.remove(listener);
		}finally{
			lock.writeLock().unlock();
		}
	}

	@Override
	public void send(ClusterEvent event) throws Exception {
		clusterConn.sendEvent(event);
	}

	@Override
	public int getChildrenCount() {
		lock.readLock().lock();
		try{
			return computers.size();
		}finally{
			lock.readLock().unlock();
		}
	}
	
	public boolean hasChild(NodeId id){
		NodeId[] ids = getChildrenIds();
		for(NodeId child: ids) if(child.equals(id)) return true;
		return false;
	}

	@Override
	public NodeId[] getChildrenIds() {
		NodeId[] res = null;
		RemoteComputer[] cs = null;
		lock.readLock().lock();
		try{
		
			res= new NodeId[computers.size()];
			cs = computers.keySet().toArray(new RemoteComputer[res.length]);
		}finally{
			lock.readLock().unlock();
		}
		for(int i=0;i<cs.length;++i){
			res[i] = cs[i].getId();
		}
		return res;
	}

	@Override
	public NodeMapping getChildrenMapping() {
		return mapping;
	}

	@Override
	public NodeExecutionQueue getExecutionQueue() {
		return executionQueue;
	}

}
