package fda.join;

import java.util.LinkedList;
import java.util.List;

import junit.framework.Assert;

import fda.base.Block;
import fda.base.DiskFile2;
import fda.base.Memory2;
import fda.base.Tuple;

public class ZigZagJoinEngine {

	private final int SIZE_50 = 50;

	private final DiskFile2 dfOut; // datafile represents join results
	private final Block outBlock;

	Memset lset, rset;

	/**
	 * 
	 * @param df1
	 * @param df2
	 * @param dfOut
	 * @param tabInfoOut
	 */
	public ZigZagJoinEngine(DiskFile2 df1, DiskFile2 df2, DiskFile2 dfOut) {
		this.dfOut = dfOut;
		this.outBlock = dfOut.getEmptyBlock();
		this.outBlock.clear();

		// get part of memory corresponed to 50 blocks
		lset = new Memset(df1, SIZE_50);
		rset = new Memset(df2, SIZE_50);
	}

	/**
	 * This method loops through two files tuples using ZigZag way
	 * 
	 */
	public void join() {
		Tuple lt;
		long lv;
		Tuple rt = rset.next();
		long rv = rt.getKeyValue();

		ext: while (rt != null) {
			lt = lset.next();
			if(lt == null)
				break ext;
			lv = lt.getKeyValue();

			if (sameInList(lv)) {
				for (Tuple t : rlist)
					joinAndAdd(lt, t);
				continue ext;
			}
			while (lt != null && rt != null) {
				
				if (lv == rv) {
					joinAndAdd(lt, rt);
					pushToList(rt);
				} else if (lv < rv) {
					continue ext;
				}

				// read new right value
				rt = rset.next();
				if(rt == null)
					break ext;
				rv = rt.getKeyValue();
				if (!sameInList(rv))
					continue ext;

			}
		}

		// write to file not complete block
		dfOut.appendBlock(outBlock.getRecords());
	}

	private void pushToList(Tuple lt) {
		if (!rlist.isEmpty() && rlist.get(0).getKeyValue() != lt.getKeyValue())
			rlist.clear();

		rlist.add(lt);
	}

	List<Tuple> rlist = new LinkedList<Tuple>();
	
	private boolean sameInList(long lv) {
		return !rlist.isEmpty() && rlist.get(0).getKeyValue() == lv;
	}

	
	
	
	/**
	 * Join two tuples based on Key. Add to memory block. If block is full -
	 * write blocks to disk
	 * 
	 * @param left
	 *            tuple to join
	 * @param right
	 *            tuple to join
	 */
	private void joinAndAdd(Tuple left, Tuple right) {
		Tuple joinedTuple = dfOut.getTableInfo().joinTuples(left, right);
		outBlock.addRecord(joinedTuple);
		if (outBlock.isFull()) {
			dfOut.appendBlock(outBlock.getRecords());
			outBlock.clear();
		}
	}

}




/**
 * Class represent an smart array. The array may be filled automatically from source disk file when 
 * it become empty.
 */
class Memset {
	private int pos;
	private int end;
	private final Tuple[] tuples;
	private final DiskFile2 df;

	private int blockPos;
	private final int maxBlocks;
	private final int batchSize;

	public Memset(DiskFile2 diskFile, int batchSize) {
		this.batchSize = batchSize;
		this.maxBlocks = diskFile.getBlockCount();
		blockPos = 0;
		df = diskFile;
		pos = 0;
		end = 0;
		tuples = Memory2.getInstance().getMemBlock(batchSize, diskFile);
	}

	/**
	 * @return next tuple or NULL if EOF
	 */
	public Tuple next() {

		if (pos >= end) {
			if (readButch() == 0)
				return null;
		}
		return tuples[pos++];
	}

	/**
	 * Read (fill) tuples with next tuples from DiskFile
	 * 
	 * @return number of read tuples
	 */
	private int readButch() {
		this.pos = 0;
		int arrPosition = 0;

		for (int i = 0; i < batchSize; ++i) {

			// when last block comes - stop reading
			if (blockPos >= maxBlocks)
				break;

			// read tuples from current block
			List<Tuple> blockTuples = df.getBlock(blockPos++).getRecords();
			for (Tuple t : blockTuples) {
				tuples[arrPosition++] = t;
			}
		}

		// update end cursor
		this.end = arrPosition;
		return arrPosition;
	}
}