package dse;

import java.io.FileInputStream;
import java.io.IOException;
import java.util.Hashtable;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.Map;
import java.util.Properties;
import java.util.TreeMap;
import java.util.Vector;

import dse.messages.MessageQueueV2;
import dse.messages.Transaction;
import dse.messages.multiversion.GetDVSSVRTransaction;
import dse.messages.multiversion.GetDVSTransaction;
import dse.messages.multiversion.GetDhtShareWithVersion3Transaction;
import dse.messages.multiversion.GetGlobalDhtCVSTransaction;
import dse.messages.multiversion.GetORPTransaction;
import dse.messages.multiversion.GetQuorumShareTransaction;
import dse.messages.multiversion.GetTLATransaction;
import dse.messages.multiversion.GetTwoLevelDhtShareTransaction;
import dse.messages.multiversion.GetUVNSVRTransaction;
import dse.messages.multiversion.GetUVNTransaction;
import dse.messages.multiversion.GetVNFTransaction;
import dse.messages.multiversion.PasisSVRTransaction;
import dse.messages.multiversion.PasisTransaction;
import dse.messages.multiversion.RepErasureTransaction;
import dse.messages.singleversion.GetAVIDTransaction;
import dse.messages.singleversion.GetDataObjectSharesNoCacheTransaction;
import dse.messages.singleversion.GetDataObjectSharesTransaction;
import dse.messages.singleversion.GetDhtDataObjectSharesTransaction;
import dse.messages.singleversion.GetHen07Transaction;
import dse.messages.singleversion.GetNSRTransaction;
import dse.messages.singleversion.SingleVersionTransaction;
/*import dse.messages.bloomfilter.BloomFilterRefreshTransaction;
import dse.messages.cachingobj.CacheDataObjectNoSharesTransaction;
import dse.messages.cachingshares.CacheDataObjectWithSharesTransaction;
import dse.messages.cachingshares.UpdateSuperNodeWithRecentRequestsTransaction;
import dse.messages.dataobj.GetDataObjectDhtOnlySchemeTransaction;
import dse.messages.dataobj.GetDataObjectMixedSchemeTransaction;*/

/**
 * This class runs the experiment.  Hence its name :)
 * 
 * @author Sam
 */
public class Experiment {
	/**
	 * This is used to make sure nodes that had a transaction within this number
	 * of periods are not set to a down state.  This is just useful to make 
	 * nodes that have made data transactions stay up a little bit longer.  
	 * Otherwise more transactions would fail.  And it makes sense that if a 
	 * data transaction was just initiated, the node would want to stay up!
	 */
	private static final int NumberOfPeriodsConsideredRecent = 100;
	public static final int AdditionalPeriodsToFinish = 0;
		
	protected ExperimentConfiguration config;
	protected Modules modules;
	protected Network network;
	protected String topologyFile;
	
	protected MessageQueueV2 messageQueue;
	protected Vector<DataObject> dataObjects;
	protected Map<Cluster, DataObjectAccessRanking> accessRankings;
	protected double experimentTime;
	public LinkedList<Transaction> transactions;
	public Map<Node,Integer> nodesInRecentDataTransactionsList;
	public Node superNode;
	protected Hashtable<DataObject, Vector<Transaction>> recentTransactions;
	protected int concurrentUpdateTimes;
	protected int concurrentUpdateReadTimes;
	/**
	 * This vector maps from Nodes (using their id-1 as the index) to a pair of
	 * integers. The first element of the pair indicates the period when their
	 * last bandwidth share was allocated, the second element indicates how much
	 * share they have left for that period.
	 */
	protected Vector<Pair<Integer, Integer>> nodeBandwidthAllocation;
	protected int lastMessageId;
	protected int lastTransId;
	/** The current period. Starts counting at 1. */
	//protected int currentPeriod;
	public static int currentPeriod;
	private LinkedList<Node> nodesWithMessage;
	
	//private int[] accessPattern;
	
	public Experiment (ExperimentConfiguration config, Modules modules) {
		this.config = config;
		this.modules = modules;

		this.experimentTime = 0;
		this.lastMessageId = 0;
		this.lastTransId = 0;
		this.transactions = new LinkedList<Transaction>();
		currentPeriod = -1;
		this.nodesInRecentDataTransactionsList = new Hashtable<Node, Integer>();
		this.nodesWithMessage = null;
		this.recentTransactions = new Hashtable<DataObject, Vector<Transaction>>();
		this.concurrentUpdateReadTimes = 0;
		this.concurrentUpdateTimes = 0;
		
		//this.accessPattern = new int[10000];
		//for(int i=0; i<10000; i++){
			//this.accessPattern[i]=0;
		//}
	}
	
	public ExperimentConfiguration configuration () {
		return config;
	}
	
	public Vector<DataObject> dataObjects () {
		return dataObjects;
	}
	
	public void addNodeWithMessage(Node node)
	{
		if(this.nodesWithMessage == null)
			this.nodesWithMessage = new LinkedList<Node>();
		if(!this.nodesWithMessage.contains(node))
			this.nodesWithMessage.add(node);
	}

	/**
	 * This decrements the remaining bandwidth a node has by the specified
	 * number of bytes.
	 * 
	 * @param node
	 * @param bytes
	 */
	public void decrementRemainingNodeBandwidth (Node node, int bytes) {
		Pair<Integer, Integer> b = nodeBandwidthAllocation.get(node.id() - 1);
		if (b.first != currentPeriod) {
			// Otherwise reallocate bandwidth to the node and return it
			b.first = currentPeriod;
			b.second = node.bandwidthLimit();
		}
		b.second = b.second - bytes;
	}

	public int getCurrentPeriod()
	{
		return currentPeriod;
	}
	
	public double getCurrentTime () {
		return experimentTime;
	}

	public int getNewMessageId () {
		lastMessageId++;
		return lastMessageId;
	}

	public int getNewTransactionId () {
		lastTransId++;
		return lastTransId;
	}

	/**
	 * This should get the upload/download bandwidth remaining in this period
	 * for the passed node. The bandwidth should be the number of bytes that it
	 * can still send.
	 * 
	 * @return
	 */
	public int getRemainingNodeBandwidth (Node node) {
		Pair<Integer, Integer> b = nodeBandwidthAllocation.get(node.id() - 1);
		if (b.first != currentPeriod) {
			// Otherwise reallocate bandwidth to the node and return it
			b.first = currentPeriod;
			b.second = node.bandwidthLimit();
		}
		return b.second;
	}

	protected void handleNewBloomFilterRefreshes (ExperimentRecorder recorder) {
		/*if (modules.bloomFilterRefreshDecisionMaker() == null)
			return;

		List<Pair<Cluster, Cluster>> c2up = modules
				.bloomFilterRefreshDecisionMaker()
				.getClustersToRefresh(network);

		for (Pair<Cluster, Cluster> p : c2up) {
			Cluster from = p.first;
			Cluster to = p.second;

			int transId = this.getNewTransactionId();
			assert from.superNode().id() != to.superNode().id();

			Transaction trans = new BloomFilterRefreshTransaction(this,
					transId, from.superNode(), to.superNode());

			transactions.add(trans);

			// Start trans...
			trans.start(recorder);
		}*/
	}

	protected void handleNewCachingRequests (ExperimentRecorder recorder,
			Map<Node, DataObject> requestsForThisPeriod) {
		/*if (modules.cacheDecisionMaker() == null)
			return;

		Map<Cluster, List<DataObject>> reqs = modules.cacheDecisionMaker()
				.makeCachingDecisions(network, requestsForThisPeriod);

		for (Cluster c : reqs.keySet()) {
			for (DataObject d : reqs.get(c)) {

				int transId = this.getNewTransactionId();

				Transaction trans = null;
				if (config.getTestType() == TestType.Mixed)
					trans = new CacheDataObjectWithSharesTransaction(
							this, transId, c.superNode(), d);
				else if (config.getTestType() == TestType.MixedWithoutShares)
					trans = new CacheDataObjectNoSharesTransaction(
							this, transId, c.superNode(), d);

				transactions.add(trans);

				// Start trans...
				trans.start(recorder);
			}
		}*/
	}

	protected Map<Node, DataObject> handleNewRequests (
			ExperimentRecorder recorder) {
		if (modules.requestEventScheduler() == null)
			return null;

		TreeMap<Double, Pair<Node, DataObject>> requests = modules.requestEventScheduler()
				.getRequests(this, accessRankings);
		
		if(requests == null)
			return null;


		for (Double time : requests.keySet()) {
			Pair<Node, DataObject> nd = requests.get(time);
			
			//this.accessPattern[nd.second.id()-1]++;
			
			int transId = this.getNewTransactionId();
			Transaction trans = null;

			if (config.getTestType() == TestType.Mixed)
				trans = new GetDataObjectSharesTransaction(this, transId, nd.first, nd.second);
			/*else if (config.getTestType() == TestType.MixedWithoutShares)
				trans = new GetDataObjectMixedSchemeTransaction(
						this, transId, n, dobj);
			else if (config.getTestType() == TestType.DhtOnly)
				trans = new GetDataObjectDhtOnlySchemeTransaction(
						this, transId, n, dobj);*/
			else if(config.getTestType() == TestType.DhtWithShares)
				trans = new GetDhtDataObjectSharesTransaction(this, transId, nd.first, nd.second);
			else if(config.getTestType() == TestType.ClusterWithShares)
				trans = new GetDataObjectSharesNoCacheTransaction(this, transId, nd.first, nd.second);
			else if(config.getTestType() == TestType.DVS)
				trans = new GetDVSTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.UVN)
				trans = new GetUVNTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.DhtShareWithVersion3)
				trans = new GetDhtShareWithVersion3Transaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.TwoLevelDhtShare)
				trans = new GetTwoLevelDhtShareTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.QuorumShare)
				trans = new GetQuorumShareTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.SingleVersion)
				trans = new SingleVersionTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.VNF)
				trans = new GetVNFTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.ORP)
				trans = new GetORPTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.DhtShareRep)
				trans = new RepErasureTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.DhtPasis)
				trans = new PasisTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.TLA)
				trans = new GetTLATransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.GDhtCVS)
				trans = new GetGlobalDhtCVSTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.NSR)
				trans = new GetNSRTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.AVID)
				trans = new GetAVIDTransaction(this, transId, nd.first, nd.second, time);
			else if(config.getTestType() == TestType.HEN07)
				trans = new GetHen07Transaction(this, transId, nd.first, nd.second, time);
			
			transactions.add(trans);
			nodesInRecentDataTransactionsList.put(
					trans.primaryNode(), currentPeriod
					);
			if(this.recentTransactions.get(nd.second) == null)
			{
				this.recentTransactions.put(nd.second, new Vector<Transaction>());
				this.recentTransactions.get(nd.second).add(trans);
			}
			else
			{
				boolean isRead = false;
				boolean isUpdate = false;
				for(Transaction trx : this.recentTransactions.get(nd.second))
				{
					if(trx.isUpdate())
						isUpdate = true;
					else
						isRead = true;
					
					if(isUpdate && isRead)
						break;
				}
				if(trans.isUpdate())
				{
					if(isRead)
						this.concurrentUpdateReadTimes++;
					if(isUpdate)
						this.concurrentUpdateTimes++;
				}
				else if(isUpdate)
					this.concurrentUpdateReadTimes++;
				this.recentTransactions.get(nd.second).add(trans);
			}

			// Start trans...
			trans.start(recorder);
		}

		return null;
	}
	
	protected void handleSVRRequests (ExperimentRecorder recorder) {
		if(!(this.config.getTestType() == TestType.DVS || this.config.getTestType() == TestType.UVN || this.config.getTestType() == TestType.DhtPasis))
			return;
		if (modules.requestEventScheduler() == null)
			return;

		TreeMap<Node, DataObject> requests = modules.requestEventScheduler().getSVRRequests(this);
		if(requests == null)
			return;

		for (Node node : requests.keySet()) {
			DataObject dobj = requests.get(node);
			
			int transId = this.getNewTransactionId();
			Transaction trans = null;

			if(config.getTestType() == TestType.DVS)
				trans = new GetDVSSVRTransaction(this, transId, node, dobj, this.currentTime());
			else if(config.getTestType() == TestType.UVN)
				trans = new GetUVNSVRTransaction(this, transId, node, dobj, this.currentTime());
			else if(config.getTestType() == TestType.DhtPasis)
				trans = new PasisSVRTransaction(this, transId, node, dobj, this.currentTime());
						
			//transactions.add(trans);
			// Start trans...
			trans.start(recorder);
		}

		return;
	}
	
	protected void handleNewUpdateRequestsForCachingMessages (
			ExperimentRecorder recorder) {
		/*if (modules.cachingRequestUpdateScheduler() == null)
			return;

		List<Node> nodes = modules.cachingRequestUpdateScheduler().getRequests(
				network);

		for (Node n : nodes) {
			Node from = n;
			Node to = network.lookupCluster(n.clusterId()).superNode();

			assert from.id() != to.id();

			int transId = this.getNewTransactionId();

			Transaction trans = new UpdateSuperNodeWithRecentRequestsTransaction(
					this, transId, from, to);

			transactions.add(trans);

			// Start trans...
			trans.start(recorder);
		}*/
	}
	
	/*protected void handleNodeMessageQueue(ExperimentRecorder recorder)
	{
		if(nodesWithMessage == null || nodesWithMessage.isEmpty())
			return;
		for(Node node : nodesWithMessage)
		{
			if(node.isUp())
			{
				node.processQueue(this, recorder);
			}
		}
	}*/
	
	protected void handleNodeStateChanges (ExperimentRecorder recorder) {
		Map<Node, NodeState> newStates = modules.nodeEventScheduler()
				.getEvents(network);

		for (Node n : newStates.keySet()) {
			NodeState ns = newStates.get(n);
			
			if (ns == NodeState.Up) {
				n.setState(ns);
				recorder.debug("Node " + n + ": State Down --> Up");
			} else {
				// Sanity check...
				assert ns == NodeState.Down;

				// For the sake of not makig some message scenarios too hard,
				// we dont want to deactivate any nodes that are in the middle
				// of doing some transaction... HOWEVER this leads to different
				// behavior eventually when running DHT-ONLY and MIXED tests.
				// This is because data requets take different times and so its
				// possible one node will be down in one exp. and up in the 
				// other.  If this happens, then new requets for the down node
				// are ignored, while requets in the other exp are processed.
				// This eventually leads to wider and wider divergence.  
				//
				// As a workaround, since we still dont want nodes that are in
				// transactions to have to go down, we have a new list of nodes
				// that have recently entered into transactions.  This list is 
				// handled the same between all experiments so it shouldnt
				// lead to any divergence... lets hope! :)
				if (isInRecentTransaction(n) == false) {
					if (isInMiddleOfSomeTransaction(n))
						removeTransactions(n, recorder);
						
					n.setState(ns);
					//n.removeHostedDataObjectsAndShares();
					//n.removeCachedDataObjectsAndShares();
					//System.out.println("Node " + n.id() + ": " + n.state());
					//n.clearLock();
					recorder.debug("Node " + n + ": State Up --> Down");
				}
			}
		}
	}

	private void hostDataObjects (ExperimentRecorder recorder) {
		// We need to host the data objects in appropriate clusters
		Iterator<DataObject> d = dataObjects.iterator();
		while (d.hasNext()) {
			DataObject dobj = d.next();
			if((config.getTestType() == TestType.DhtWithShares) ||
					(config.getTestType() == TestType.DVS) ||
					(config.getTestType() == TestType.UVN) ||
					(config.getTestType() == TestType.DhtShareWithVersion3)||
					(config.getTestType() == TestType.QuorumShare)||
					(config.getTestType() == TestType.SingleVersion)||
					(config.getTestType() == TestType.VNF)||
					(config.getTestType() == TestType.ORP) ||
					(config.getTestType() == TestType.DhtShareRep) ||
					(config.getTestType() == TestType.DhtPasis) ||
					(config.getTestType() == TestType.GDhtCVS) ||
					(config.getTestType() == TestType.NSR) ||
					(config.getTestType() == TestType.AVID) ||
					(config.getTestType() == TestType.HEN07))
			{
				modules.assigner().hostShares(network, dobj);
			}
			else if(config.getTestType() == TestType.DhtOnly)
			{
				Iterable<Node> nodes = modules.assigner().assign(dobj, network);
				for(Node node : nodes)
					node.host(dobj);
			}
			else
			{
				Iterable<Cluster> hosts = modules.assigner().assign(dobj,
						network.clusters(), network.maxClusterId());
				// boolean atLeastOneSuccess = false;
				for (Cluster host : hosts) {
					if ((config.getTestType() == TestType.Mixed) || 
							(config.getTestType() == TestType.ClusterWithShares) ||
							(config.getTestType() == TestType.TwoLevelDhtShare) ||
							(config.getTestType() == TestType.TLA))
						modules.assigner().hostShares(host, dobj);
					else {
						if (config.getTestType() == TestType.MixedWithoutShares)
							host.host(dobj);
						// This works for DhtOnly and MixedWithoutShares
						//Node singleHost = modules.assigner().assign(dobj, host);
						//singleHost.host(dobj);
					}
					/*if (!success) {
						recorder.warn("Data Object #" + dobj.id()
								+ " could not be hosted in designated cluster #"
								+ host.id() + ".");
					}*/ // else atLeastOneSuccess = true;
				}
			}
		}
		
		/***/
		/*Collection<Node> nodes = this.network.nodes();
		double[] distances = new double[32];
		DataObject dobj = this.dataObjects.firstElement();
		for(Node node : nodes){
			LinkedList<Node> servers = MessageUtil.getNodesByDht(this.network, node, dobj, this.modules.assigner());
			for(int i=0; i<servers.size(); i++){
				distances[i] += node.distanceTo(servers.get(i));
			}
		}
		for(int i=0; i<distances.length; i++){
			distances[i] = distances[i]/nodes.size();
			double ping = 0.4 + 0.3*Math.pow(distances[i], 0.735);
			distances[i] = (0.02*Math.pow(10064, 0.51)*ping + ping)/1000;
			distances[i] += (0.02*Math.pow(48, 0.51)*ping + ping)/1000;
			System.out.println(distances[i]);
		}
		System.out.println();*/
		/***/
	}

	private boolean isInMiddleOfSomeTransaction (Node node) {
		for (Transaction t : transactions) {
			if (t.primaryNode() == node)
				return true;
		}
		return false;
	}
	
	private boolean isInRecentTransaction (Node node) {
		if (nodesInRecentDataTransactionsList.containsKey(node) == false)
			return false;
		return nodesInRecentDataTransactionsList.get(node) > 
			currentPeriod - NumberOfPeriodsConsideredRecent;
	}

	public MessageQueueV2 messageQueue () {
		return messageQueue;
	}

	public Modules modules () {
		return modules;
	}

	public Network network () {
		return network;
	}

	/**
	 * This actual only aborts data transactions, cause thats the only kind we
	 * really care about.
	 * 
	 * @param n
	 * @param recorder
	 */
	private void removeTransactions (Node n, ExperimentRecorder recorder) {
		Iterator<Transaction> transI = transactions.iterator();
		while (transI.hasNext()) {
			Transaction trans = transI.next();
			
			if (trans.primaryNode() != n)
				continue;
			
			transI.remove();
		}
	}

	public void runPeriod (ExperimentRecorder recorder) {
		currentPeriod++;

		recorder.nextPeriod(experimentTime, config.getTimePeriodLength());

		//handleNodeMessageQueue(recorder);
		// Handle the node event scheduler... for the most part :)
		if(currentPeriod == 1)
			handleNodeStateChanges(recorder);
		
		// Decide what new requests need to be initiated...
		if(currentPeriod <= (config.getNumberOfPeriodsToRun()-Experiment.AdditionalPeriodsToFinish))
		{
			handleNewRequests(recorder);
		}
		
		if(config.getTestType() == TestType.DhtPasis ||
				config.getTestType() == TestType.DVS ||
				config.getTestType() == TestType.UVN){
			handleSVRRequests(recorder);
		}

		// And then mostly we just have to process all messages in the queue.
		messageQueue.process(this, recorder);
		// Increment time
		experimentTime += config.getTimePeriodLength();

		// Remove finished transactions
		Iterator<Transaction> ti = transactions.iterator();
		while (ti.hasNext())
		{
			Transaction trx = ti.next();
			for(DataObject dobj : this.recentTransactions.keySet())
			{
				if(this.recentTransactions.get(dobj).contains(trx)){
					this.recentTransactions.get(dobj).remove(trx);
					break;
				}
			}
			if (trx.isFinished())
			{
				ti.remove();
			}
		}
		if(this.config.getTestType() == TestType.DVS || this.config.getTestType() == TestType.UVN || this.config.getTestType() == TestType.DhtPasis)
			spaceCost();
	}
	
	public void startExperiment (ExperimentRecorder recorder)
	throws DseException
	{
		currentPeriod = 0;
		
		/*for(int i=0; i<100; i++){
			network = modules.networkTopologyGenerator().createNetwork();
			modules.clusterGenerator().generateClusters(network);
			modules.bandwidthAssigner().assignBandwidths(network);
			String file = "D:\\java\\dse\\topology\\network" + i + ".dat";
			//String file = "D:\\java\\dse\\topology\\network.dat";
			IOUtil.NetworkToFile((EarthLikeNetwork)network, file);
		}*/
		/*if(System.getProperty("os.name").startsWith("Windows"))
			network = IOUtil.NetworkFromFile("d:\\java\\dse\\network.dat");
		else
			network = IOUtil.NetworkFromFile("/d/java/dse/network.dat");*/
		network = IOUtil.NetworkFromFile(topologyFile);
		superNode = network.lookupNode(network.maxNodeId());
		superNode.setIsSupernode(true);
		dataObjects = modules.dataObjectsGenerator().generateDataObjects();
		//nodeBandwidthAllocation = new Vector<Pair<Integer, Integer>>(network.maxNodeId());

		// Make sure all nodes are set to their default UP and while we are at
		// it setup their bandwidth information
		for (Node n : network.nodes()) {
			n.setState(NodeState.Up);
			/*while (nodeBandwidthAllocation.size() < n.id())
				nodeBandwidthAllocation.add(null);
			nodeBandwidthAllocation.set(n.id() - 1, new Pair<Integer, Integer>(
					-1, n.bandwidthLimit()));*/
		}

		hostDataObjects(recorder);

		// Now we have to initalize bloom filters
		/*for (Cluster c : network.clusters()) {
			c.setBloomFilter(modules.bloomFilterGenerator()
					.generateBloomFilterForCachedDataObjects(c));
		}*/

		// Now set some nodes to down...
		if (modules.nodeEventScheduler() != null)
			modules.nodeEventScheduler().setInitalStates(network);

		accessRankings = modules.dataObjectAccessRanker()
				.generateAccessRankings(network, dataObjects);
		//IOUtil.AccessRankingsToFile(accessRankings, "accessRankings.dat");
		//accessRankings = IOUtil.AccessRankingsFromFile("accessRankings.dat");
		messageQueue = new MessageQueueV2();

		if (modules.requestEventScheduler() == null)
			recorder
					.warn("No node scheduler module was found. No data requests "
							+ "will be made.");
	}
	
	
	
	/**
	 * Run an Experiment.
	 * 
	 * @param args
	 * @throws IOException
	 * @throws DseException
	 */
	public static void main (String [] args) 
	throws Exception
	{
		if (args.length != 2) {
			System.out.println(
					"Usage: java -cp dse.jar dse.Experiment " +
					"<config-file> <topology-file>"
					);
			System.exit(-1);
		}
		runTest(args[0], args[1]);
	}
	
	public static void runTest (Properties props, String topologyFile)
	throws DseException
	{
		ExperimentConfiguration conf = new ExperimentConfiguration(props);
		Modules modules = new Modules (conf, props);
		
		Experiment test = new Experiment (conf, modules);
		test.topologyFile = topologyFile;
		
		ExperimentRecorder recorder = new StandardExperimentRecorder(
				PropertiesUtil.collectSubset(props, "recorder.")				
				);
		recorder.setExperiment(test);

		System.out.println("STARTING EXPERIMENT");

		if(test.config.getTestType() == TestType.UVN ||
				test.config.getTestType() == TestType.TwoLevelDhtShare ||
				test.config.getTestType() == TestType.SingleVersion)
			DataShare.order = 0;
		else
			DataShare.order = 1;
		DataShare.failure = test.modules.nodeEventScheduler().getEventsPerPeriod();
		
		test.startExperiment(recorder);
		long startTime = System.currentTimeMillis();
		test.spaceCost();
		for (int a = 0; a < conf.getNumberOfPeriodsToRun(); a++) {
			//System.out.println("PERIOD #" + (a+1));		
			test.runPeriod(recorder);
		}
		long endTime = System.currentTimeMillis();
		System.out.println("DONE");
		System.out.println("");
		
		int queueSize = 0;
		if(test.config.getTestType() == TestType.DhtWithShares)
			queueSize = test.queueSize();
		
		ExperimentResults results = recorder.getResults();
		//double[] spaceCost = test.spaceCost();
		results.setQueueSize(queueSize);
		//results.setSpaceCost(spaceCost);
		results.setTotalConcurrentTimes(test.concurrentUpdateTimes, test.concurrentUpdateReadTimes);
		System.out.println(results.toString(
				test.config.getTestType(),
				test.network, 
				test.dataObjects,
				((double)(endTime - startTime))/1000
				));
		test.avgSpace();
		test.maxSpace();
		/*Hashtable<Integer, Integer> topTen = new Hashtable<Integer, Integer>();
		for(int k=0; k<10; k++){
			int max = 0;
			int maxi = 0;
			for(int i=0; i<10000; i++){
				if(test.accessPattern[i] > max){
					max = test.accessPattern[i];
					maxi = i;
				}
			}
			topTen.put(maxi, max);
			test.accessPattern[maxi] = 0;
		}
		System.out.println("Top Ten accesses:");
		for(Integer i : topTen.keySet())
		{
			System.out.println(i.intValue() + " : " + topTen.get(i));
		}*/
	}
	
	public static void runTest (String propertiesFile, String topologyFile)
	throws DseException, IOException
	{
		Properties props = new Properties ();
		
		FileInputStream fis = null;
		try {
			fis = new FileInputStream (propertiesFile);
			props.load(fis);
		} finally {
			fis.close();
		}
		
		PropertiesUtil.preprocessProperties(props);
		
		runTest(props, topologyFile);
	}
	private void spaceCost()
	{
		double[] size = new double[this.dataObjects.size()];
		for(Node node : network.nodes())
		{
			if(node.getHostShares() != null)
			{
				Hashtable<Integer, DataShare> shares = node.getHostShares();
				for(Integer i : shares.keySet())
				{
					DataShare share = shares.get(i);
					size[i-1] += share.getVersionSize()*dataObjects.get(0).shareSize();
				}
			}
		}
		double max = 0;
		double sum = 0;
		for(int i=0; i<size.length; i++){
			if(max < size[i])
				max = size[i];
			sum += size[i];
		}
		System.out.print(currentPeriod + "\t");
		System.out.print(sum*1.0/1000/size.length + "\t");
		System.out.println(max/1000);
	}
	
	public double currentTime()
	{
		return (currentPeriod-1)*config.getTimePeriodLength();
	}
	
	public int queueSize(){
		return Node.queuedRequestSize;
	}
	
	public void avgSpace(){
		double[] avgSize = new double[10000];
		for(int i=0; i<10000; i++){
			avgSize[i] = DataShare.size[i]*1.0/DataShare.count[i];
		}
		double sum = 0;
		double avgMax = 0;
		for(int i=0; i<10000; i++){
			sum+=avgSize[i];
			if(avgMax < avgSize[i])
				avgMax = avgSize[i];
		}
		System.out.println("Avg Sapce Cost: " + sum/10000);
		System.out.println("Avg Max Sapce Cost: " + avgMax);
	}
	public void maxSpace(){
		double sum = 0;
		double max = 0;
		for(int i=0; i<10000; i++){
			sum+=DataShare.maxSize[i];
			if(max < DataShare.maxSize[i])
				max = DataShare.maxSize[i];
		}
		System.out.println("Avg Max Sapce Cost: " + sum/10000);
		System.out.println("Max Max Sapce Cost: " + max);
	}
}